code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
curl http://downloads.drone.io/release/linux/amd64/drone.tar.gz | tar zx
sudo install -t /usr/local/sbin drone
rm -f drone
BASH_PROFILE=/home/vagrant/.bash_profile
echo "export DRONE_SERVER=" >> $BASH_PROFILE
echo "export DRONE_TOKEN=" >> $BASH_PROFILE
|
CiscoCloud/vaquero-vagrant
|
provision_scripts/drone.sh
|
Shell
|
apache-2.0
| 255 |
#!/usr/bin/env bash
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This bash library includes various I/O functions, including logging functions.
#
# Example:
#
# source module /ci/lib/io.sh
# io::log_yellow "Hello world"
# Make our include guard clean against set -o nounset.
test -n "${CI_LIB_IO_SH__:-}" || declare -i CI_LIB_IO_SH__=0
if ((CI_LIB_IO_SH__++ != 0)); then
return 0
fi # include guard
# Callers may use these IO_* variables directly, but should prefer to use the
# logging functions below instead. For example, prefer `io::log_green "..."`
# over `echo "${IO_COLOR_GREEN}...${IO_RESET}"`.
if [ -t 0 ] && command -v tput >/dev/null; then
IO_BOLD="$(tput bold)"
IO_COLOR_RED="$(tput setaf 1)"
IO_COLOR_GREEN="$(tput setaf 2)"
IO_COLOR_YELLOW="$(tput setaf 3)"
IO_RESET="$(tput sgr0)"
else
IO_BOLD=""
IO_COLOR_RED=""
IO_COLOR_GREEN=""
IO_COLOR_YELLOW=""
IO_RESET=""
fi
readonly IO_BOLD
readonly IO_COLOR_RED
readonly IO_COLOR_GREEN
readonly IO_COLOR_YELLOW
readonly IO_RESET
export CI_LIB_IO_FIRST_TIMESTAMP=${CI_LIB_IO_FIRST_TIMESTAMP:-$(date '+%s')}
# Prints the current time as a string.
function io::internal::timestamp() {
local now
now=$(date '+%s')
local when=(-d "@${now}")
case "$(uname -s)" in
Darwin) when=(-r "${now}") ;;
esac
echo "$(date "${when[@]}" -u '+%Y-%m-%dT%H:%M:%SZ')" \
"$(printf '(%+ds)' $((now - CI_LIB_IO_FIRST_TIMESTAMP)))"
}
# Logs a message using the given terminal capability. The first argument
# must be one of the IO_* variables defined above, such as "${IO_COLOR_RED}".
# The remaining arguments will be logged using the given capability. The
# log message will also have an RFC-3339 timestamp prepended (in UTC).
function io::internal::log_impl() {
local termcap="$1"
shift
local timestamp
timestamp="$(io::internal::timestamp)"
echo "${termcap}${timestamp}: $*${IO_RESET}"
}
# Logs the given message with normal coloring and a timestamp.
function io::log() {
io::internal::log_impl "${IO_RESET}" "$@"
}
# Logs the given message in green with a timestamp.
function io::log_green() {
io::internal::log_impl "${IO_COLOR_GREEN}" "$@"
}
# Logs the given message in yellow with a timestamp.
function io::log_yellow() {
io::internal::log_impl "${IO_COLOR_YELLOW}" "$@"
}
# Logs the given message in red with a timestamp.
function io::log_red() {
io::internal::log_impl "${IO_COLOR_RED}" "$@"
}
# Logs the given message in bold with a timestamp.
function io::log_bold() {
io::internal::log_impl "${IO_BOLD}" "$@"
}
# Logs the arguments, in bold with a timestamp, and then executes them.
# This is like executing a command under "set -x" in the shell (including
# the ${PS4} prefix).
function io::run() {
local cmd
cmd="$(printf ' %q' "$@")"
io::log_bold "${PS4}${cmd# }"
"$@"
}
# Logs an "H1" heading. This looks like a blank line and the current time,
# followed by the message in a double-lined box.
#
# 2021-06-04T17:16:00Z
# ========================================
# | This is an example of io::log_h1 |
# ========================================
function io::log_h1() {
local timestamp
timestamp="$(io::internal::timestamp)"
local msg="| $* |"
local line
line="$(printf -- "=%.0s" $(seq 1 ${#msg}))"
printf "\n%s\n%s\n%s\n%s\n" "${timestamp}" "${line}" "${msg}" "${line}"
}
# Logs an "H2" heading. Same as H1, but uses a single-lined box.
#
# 2021-06-04T17:16:00Z
# ----------------------------------------
# | This is an example of io::log_h2 |
# ----------------------------------------
function io::log_h2() {
local timestamp
timestamp="$(io::internal::timestamp)"
local msg="| $* |"
local line
line="$(printf -- "-%.0s" $(seq 1 ${#msg}))"
printf "\n%s\n%s\n%s\n%s\n" "${timestamp}" "${line}" "${msg}" "${line}"
}
|
googleapis/google-cloud-cpp
|
ci/lib/io.sh
|
Shell
|
apache-2.0
| 4,325 |
#!/bin/bash
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
data_name=Freebase
data_folder=$HOME/data/knowledge_graphs/$data_name
eval_path=$data_folder/eval-miss-100-neg-1000
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
python ../main_train.py --do_train --do_valid --gpus '0.1.2.3.4.5.6.7' \
--data_path $data_folder --eval_path $eval_path \
-n 1024 -b 512 -d 400 -g 2 \
-a 0.5 -adv \
-lr 0.00005 --max_steps 2000001 --geo vec --valid_steps 20000 \
--tasks '1p.2p.3p.2i.3i.ip.pi' --training_tasks '1p.2p.3p.2i.3i.ip.pi' \
--filter_test \
--sampler_type nosearch \
--share_negative \
--save_checkpoint_steps 2000000 \
--share_optim_stats \
--online_sample --prefix '../logs' --online_sample_mode '(10000,0,u,u,0)' \
--train_online_mode '(single,3000,e,True,before)' --optim_mode '(aggr,adam,cpu,True,5)' --online_weighted_structure_prob '(1,1,1,1,1,1,1)' --print_on_screen \
--port 29500
|
google-research/smore
|
smore/training/vec_scripts/train_freebase.sh
|
Shell
|
apache-2.0
| 1,433 |
#!/bin/bash
#######
# This file contains instructions to build the demo environment
# from source
#
# - as root,
# - within a bare Ubuntu 14.04 docker image
#
# 1. To launch into the bare image, use
# sudo docker run -p 3000:3000 -p 3350:3350 -p 3111:3111 -p 8080:8080 -it ubuntu:14.04 /bin/bash
#
# 2. Copy and paste this entire file into the prompt
#
# 3. Run one by one all the update functions except update_all
# (or run only update_all)
# This step requires downloading around 500M, and
# some pretty heavy compilation.
#
# 4. Optional. Save your work so steps 1-3 need not be repeated:
# - exit the image: execute 'exit'
# - save the image: execute 'sudo docker clone <id> <name>'
# (use 'sudo docker ps -a' to find its <id>)
# - re-start the image:
# sudo docker run -p 3000:3000 -p 3350:3350 -p 3111:3111 -p 8080:8080 -it <name> /bin/bash
#
# 5. Launch supporting servers
# - launch_redis && launch_mongo && launch_el
# - launch_zookeeper
# - launch_kafka
# - launch_storm
# - launch_openlrs
#
# 6. Launch WP2 servers, one by one
# - launch_openlrs
# - launch_test_users
# - launch_lrs
# - launch_gf
# - launch_emo
#
export MAVEN_VERSION="3.3.3"
export NODE_NUM_VERSION="v0.12.7"
export NODE_VERSION="node-v0.12.7-linux-x64"
export REDIS_VERSION="redis-3.0.4"
export EL_VERSION="elasticsearch-1.7.1"
export STORM_VERSION="apache-storm-0.9.5"
export ZOOKEEPER_VERSION="zookeeper-3.4.6"
export KAFKA_NUM_VERSION="0.8.2.1"
export KAFKA_VERSION="kafka_2.10-0.8.2.1"
export PATH_TO_GLEANER_REALTIME_JAR="/opt/gleaner-realtime/target/realtime-jar-with-dependencies.jar"
export PATH_TO_L_I_SPACE_WEBAPP="/opt/lostinspace/html/target/webapp"
# used to download sources, executables
function update_tools {
apt-get update && apt-get install -y nano git wget gcc g++ make openjdk-7-jdk
cd /opt
wget http://apache.rediris.es/maven/maven-3/${MAVEN_VERSION}/binaries/apache-maven-${MAVEN_VERSION}-bin.tar.gz
tar -xvzf apache-maven-${MAVEN_VERSION}-bin.tar.gz
cd /
ln -sf /opt/apache-maven-${MAVEN_VERSION}/bin/mvn /usr/local/bin
}
function update_with_git {
cd /opt
git clone https://github.com/$1/$2
sleep 1s
cd $2
git fetch origin $3
git pull origin $3
sleep 1s
}
function update_node {
cd /tmp
wget https://nodejs.org/dist/${NODE_NUM_VERSION}/${NODE_VERSION}.tar.gz
cd /opt
tar -xvzf /tmp/${NODE_VERSION}.tar.gz
cd /
ln -sf /opt/${NODE_VERSION}/bin/* /usr/local/bin
npm install -g bower
}
function scriptify { # name dir commands...
TARGET=/opt/${1}.sh
shift
cd /opt
echo "#! /bin/bash" > $TARGET
echo cd $1 >> $TARGET
shift
echo "$@" >> $TARGET
cd /opt
chmod 0755 $TARGET
}
function update_mongo {
# mongo via apt; see http://docs.mongodb.org/master/tutorial/install-mongodb-on-ubuntu/
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
echo "deb http://repo.mongodb.org/apt/ubuntu trusty/mongodb-org/3.0 multiverse" | tee /etc/apt/sources.list.d/mongodb-org-3.0.list
apt-get update
apt-get install -y mongodb-org
}
function update_redis {
cd /opt
wget http://download.redis.io/releases/${REDIS_VERSION}.tar.gz
tar xvzf ${REDIS_VERSION}.tar.gz
cd ${REDIS_VERSION}
make
ln -sf /opt/${REDIS_VERSION}/src/redis-server /usr/local/bin
}
function update_el {
cd /opt
wget https://download.elastic.co/elasticsearch/elasticsearch/${EL_VERSION}.deb
dpkg -i ${EL_VERSION}.deb
}
function update_storm {
cd /opt
wget http://apache.rediris.es/storm/${STORM_VERSION}/${STORM_VERSION}.tar.gz
tar -xvzf ${STORM_VERSION}.tar.gz
cd ${STORM_VERSION}/conf
echo "ui.port: 8081" >> storm.yaml
cd /
ln -sf /opt/${STORM_VERSION}/bin/storm /usr/local/bin
}
function update_zookeeper {
cd /opt
wget http://apache.rediris.es/zookeeper/${ZOOKEEPER_VERSION}/${ZOOKEEPER_VERSION}.tar.gz
tar -xvzf ${ZOOKEEPER_VERSION}.tar.gz
cd /
ln -sf /opt/${ZOOKEEPER_VERSION}/bin/zk*.sh /usr/local/bin
cd /opt/${ZOOKEEPER_VERSION}/conf/
cp zoo_sample.cfg zoo.cfg
}
function update_kafka {
cd /opt
wget http://apache.rediris.es/kafka/${KAFKA_NUM_VERSION}/${KAFKA_VERSION}.tgz
tar -xvzf ${KAFKA_VERSION}.tgz
cd /
ln -sf /opt/${KAFKA_VERSION}/bin/*.sh /usr/local/bin
}
function update_gleaner_realtime { # updates .m2 cache
update_with_git RotaruDan gleaner-realtime toledo-09-15
cd /opt/gleaner-realtime
mvn clean install
}
function update_openlrs {
update_with_git RotaruDan OpenLRS toledo-09-15
}
# updates .m2 cache; SLOW
function update_lostinspace {
update_with_git RotaruDan lostinspace toledo-09-15
update_with_git e-ucm xmltools master
cd /opt/xmltools
mvn clean install
cd /opt/lostinspace
mvn clean install -Phtml
}
function update_test_users {
update_with_git RotaruDan test-users toledo-09-15
npm install
npm run fast-setup
npm run gen-apidoc
# npm test # requires redis, mongo running
scriptify test-users test-users npm start
}
# depends: gleaner-realtime
function update_lrs {
update_with_git RotaruDan lrs toledo-09-15
cd /opt/lrs
echo "exports.defaultValues.realtimeJar='${PATH_TO_GLEANER_REALTIME_JAR}';" >> config-values.js
echo "exports.defaultValues.stormPath='/opt/${STORM_VERSION}/bin';" >> config-values.js
npm install
npm run fast-setup
npm run gen-apidoc
# npm test # requires redis, mongo running
scriptify lrs lrs npm start
}
# depends: lost-in-space
function update_gf {
update_with_git gorco gf toledo-09-15
cd /opt/gf
npm install
bower --allow-root install
npm run fast-setup
mkdir app
mkdir app/public
rm -rf app/public/lostinspace
cp -r ${PATH_TO_L_I_SPACE_WEBAPP} app/public/lostinspace
cd app/public/
wget https://dl.dropboxusercontent.com/u/3300634/inboxed.tar.gz
tar -xvzf inboxed.tar.gz
mv webapp inboxed
scriptify gf gf npm start
}
# front and back-ends for emotions
function update_emo {
update_with_git gorco emoB master
cd /opt/emoB
npm install
scriptify emoB emoB npm start
update_with_git gorco emoF master
cd /opt/emoF
npm install
scriptify emoF emoF npm start
}
function update_all {
update_tools
update_node
update_mongo
update_redis
update_el
update_storm
update_zookeeper
update_kafka
update_gleaner_realtime
update_openlrs
update_lostinspace
update_test_users
update_lrs
update_gf
update_emo
}
function get_pids { # $! is broken in docker
ps -Af | grep $1 \
| tr -s " " "|" | cut -d "|" -f 2 | head -n -1 \
| xargs
}
function launch_redis {
PIDFILE="/opt/redis.pid"
LOGFILE="/opt/redis.log"
kill $(cat ${PIDFILE})
# in warning shown when launched otherwise
echo never > /sys/kernel/mm/transparent_hugepage/enabled
(redis-server > ${LOGFILE} 2>&1 & )
sleep 4s
PIDS=$(get_pids redis)
echo -n $PIDS > $PIDFILE
echo "Launched redis: $PIDS"
cd /opt
}
function launch_mongo {
PIDFILE="/opt/mongo.pid"
LOGFILE="/opt/mongo.log"
kill $(cat ${PIDFILE})
mkdir /opt/mongoDB
(mongod --dbpath /opt/mongoDB > ${LOGFILE} 2>&1 & )
sleep 4s
PIDS=$(get_pids mongod)
echo -n $PIDS > $PIDFILE
echo "Launched mongo: $PIDS"
cd /opt
}
function launch_el {
/etc/init.d/elasticsearch restart
echo "Launched ElasticSearch (via init.d)"
}
function launch_kafka {
PIDFILE="/opt/kafka.pid"
LOGFILE="/opt/kafka.log"
kill $(cat ${PIDFILE})
cd /opt/${KAFKA_VERSION}
(bin/kafka-server-start.sh config/server.properties > ${LOGFILE} 2>&1 & )
sleep 4s
PIDS=$(get_pids kafka_2)
echo -n $PIDS > $PIDFILE
echo "Launched kafka: $PIDS"
cd /opt
}
function launch_zookeeper {
PIDFILE="/opt/zookeeper.pid"
LOGFILE="/opt/zookeeper.log"
kill $(cat ${PIDFILE})
cd /opt/${ZOOKEEPER_VERSION}/bin
(./zkServer.sh start > ${LOGFILE} 2>&1 & )
sleep 4s
PIDS=$(get_pids zookeeper)
echo -n $PIDS > $PIDFILE
echo "Launched zookeeper: $PIDS"
cd /opt
}
function launch_storm {
PIDFILE="/opt/storm.pid"
kill $(cat ${PIDFILE})
LOGFILE="/opt/storm_nimbus.log"
(storm nimbus > ${LOGFILE} 2>&1 & )
PIDS=$(get_pids nimbus)
echo -n "$PIDS " > $PIDFILE
sleep 2s
LOGFILE="/opt/storm_supervisor.log"
(storm supervisor > ${LOGFILE} 2>&1 & )
PIDS=$(get_pids supervisor)
echo -n "$PIDS " >> $PIDFILE
sleep 2s
LOGFILE="/opt/storm_ui.log"
(storm ui > ${LOGFILE} 2>&1 & )
PIDS=$(get_pids .ui)
echo -n "$PIDS " >> $PIDFILE
sleep 2s
echo "Launched storm: $PIDS"
cd /opt
}
function launch_openlrs {
PIDFILE="/opt/openlrs.pid"
LOGFILE="/opt/openlrs.log"
kill $(cat ${PIDFILE})
cd /opt/OpenLRS
chmod 0755 run.sh
echo "Warning - this takes a long time to start (~1m)"
(./run.sh > ${LOGFILE} 2>&1 & )
sleep 4s
PIDS=$(get_pids openlrs)
echo -n $PIDS > $PIDFILE
echo "Launched OpenLRS: $PIDS"
cd /opt
}
function launch_node {
PIDFILE="/opt/$1.pid"
LOGFILE="/opt/$1.log"
kill $(cat ${PIDFILE})
(./$1.sh > ${LOGFILE} 2>&1 & )
sleep 4s
PIDS=$(get_pids $1.sh)
echo -n $PIDS > $PIDFILE
echo "Launched $1 via Node: $PIDS"
cd /opt
}
function launch_test_users {
launch_node test-users
}
function launch_lrs {
launch_node lrs
}
function launch_gf {
launch_node gf
}
function launch_emo {
launch_node emoB
launch_node emoF
}
# WARNING - this is for reference; do not execute directly
# as services take a while to start, and some require others
# to be running to start properly
function launch_all {
launch_zookeeper #
launch_redis
launch_mongo # 27017
launch_el
launch_storm # 8081 + internal
launch_kafka
launch_openlrs # 8080
launch_test_users # 3000 ; also :3000/api
launch_lrs # 3300 ;
launch_gf # 3350
launch_emo # 3111 (frontend); 3232 (be)
}
function log {
tail -n 100 -f $1
}
|
manuel-freire/wp2-demo
|
go.sh
|
Shell
|
apache-2.0
| 10,296 |
# !/bin/sh
if ifconfig | grep -q vpp-dhcpd; then ip link del vpp-dhcpd; fi; \
if ip netns list | grep -q dhcpd; then ip netns del dhcpd; fi;
ip link add name vpp-dhcpd type veth peer name dhcpd-vpp
ip netns add dhcpd
ip link set dhcpd-vpp netns dhcpd
# ip netns exec dhcpd vconfig add dhcpd-vpp 10
# ip netns exec dhcpd vconfig add dhcpd-vpp 20
# ip netns exec dhcpd vconfig set_flag dhcpd-vpp.10 0 0
# ip netns exec dhcpd vconfig set_flag dhcpd-vpp.20 0 0
ip netns exec dhcpd ifconfig dhcpd-vpp 0.0.0.0 up
# ip netns exec dhcpd ifconfig dhcpd-vpp.10 192.168.0.1/24 up
# ip netns exec dhcpd ifconfig dhcpd-vpp.20 192.169.0.1/24 up
# ip netns exec dhcpd dhcpd -4 -f -d --no-pid dhcpd-vpp.10 dhcpd-vpp.20
ip netns exec dhcpd dhcpd -4 -f -d dhcpd-vpp
# vconfig add vpp-dhcpd 10
# vconfig add vpp-dhcpd 20
# ifconfig vpp-dhcpd.10 192.168.0.2/24 up
# ifconfig vpp-dhcpd.20 192.169.0.2/24 up
# ip netns exec dhcpd vconfig set_flag dhcpd-vpp.10 1 0
# ip netns exec dhcpd vconfig set_flag dhcpd-vpp.20 1 1
|
halexan/msc
|
test/dhcp/namespace.sh
|
Shell
|
apache-2.0
| 1,002 |
#!/sbin/sh
#
# /system/addon.d/10-mapsapi.sh
#
. /tmp/backuptool.functions
list_files() {
cat <<EOF
etc/permissions/com.google.android.maps.xml
framework/com.google.android.maps.jar
EOF
}
case "$1" in
backup)
list_files | while read FILE DUMMY; do
backup_file $S/$FILE
done
;;
restore)
list_files | while read FILE REPLACEMENT; do
R=""
[ -n "$REPLACEMENT" ] && R="$S/$REPLACEMENT"
[ -f "$C/$S/$FILE" ] && restore_file $S/$FILE $R
done
;;
pre-backup)
# Stub
;;
post-backup)
# Stub
;;
pre-restore)
# Stub
;;
post-restore)
# Stub
;;
esac
|
microg/android_frameworks_mapsv1
|
mapsv1-flashable/src/main/files/system/addon.d/10-mapsapi.sh
|
Shell
|
apache-2.0
| 618 |
#!/bin/bash
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
URL="https://${SERVICE_NAME?}-dot-${PROJECT?}.appspot.com"
################################################################################
# DAM
################################################################################
function start_dam() {
start_hydra
# SERVICE_NAME allows different instances to use different resources, such as storage.
# It is common to keep this in sync with the YAML "service" name via $TYPE.
export SERVICE_NAME="${SERVICE_NAME?}"
# HYDRA_PUBLIC_URL sets the hydra public url for start login.
# TODO need to update after we deploy hydra on GCP.
export HYDRA_PUBLIC_URL="${URL?}"
# HYDRA_ADMIN_URL sets the hydra admin url for callback.
# TODO need to update after we deploy hydra on GCP.
export HYDRA_ADMIN_URL="http://127.0.0.1:4445"
export HYDRA_PUBLIC_URL_INTERNAL="http://127.0.0.1:4444"
export USE_HYDRA="true"
# CONFIG_PATH is the path used for reading and writing config files.
export CONFIG_PATH="deploy/config"
# STORAGE is one of: "memory", "datastore".
export STORAGE="datastore"
# FEDERATED_ACCESS_ENABLE_EXPERIMENTAL turns on experimental features if set to 'true'.
# Not for use with production systems.
export FEDERATED_ACCESS_ENABLE_EXPERIMENTAL="${EXPERIMENTAL?}"
export DAM_PORT="8000"
# DAM_URL is the expected service URL in GA4GH passports targetted at this service.
export DAM_URL="${URL?}"
# DEFAULT_BROKER is the default identity broker.
export DEFAULT_BROKER="default_ic"
echo Starting DAM
cd /hcls-fa
./dam -alsologtostderr &
echo Started DAM
start_nginx
}
################################################################################
# Hydra
################################################################################
function start_hydra() {
echo Starting HYDRA
# Use jwt access tokem
export OAUTH2_ACCESS_TOKEN_STRATEGY="jwt"
# Encryption support in database
# TODO: should read from cloud store
export SECRETS_SYSTEM="123456789012345657890"
# CORS for public
export SERVE_PUBLIC_CORS_ENABLED="true"
export SERVE_PUBLIC_CORS_ALLOWED_ORIGINS="*"
# issuer URL
export URLS_SELF_ISSUER="${URL?}"
# Login and consent app
export URLS_CONSENT="${URL?}/dam/consent"
export URLS_LOGIN="${URL?}/dam/login"
# Database connect
export DSN="postgres://hydra:[email protected]:1234/${SERVICE_NAME?}?sslmode=disable"
# Setup database for hydra.
cd /hydra
./hydra migrate sql --yes $DSN
# Start hydra
# use --dangerous-force-http because GAE take care of https.
./hydra serve all --dangerous-force-http &
sleep 10
echo Started HYDRA
}
################################################################################
# Ngnix
################################################################################
function start_nginx() {
echo Starting NGINX
cd /
nginx
echo Started NGINX
}
start_dam
# Wait
sleep infinity
|
GoogleCloudPlatform/healthcare-federated-access-services
|
deploy/build-templates/dam/entrypoint.bash
|
Shell
|
apache-2.0
| 3,480 |
#!/usr/bin/env bash
FAKE="packages/build/FAKE/tools/FAKE.exe"
BUILDSCRIPT="build/scripts/Targets.fsx"
mono .paket/paket.bootstrapper.exe
if [[ -f .paket.lock ]]; then mono .paket/paket.exe restore; fi
if [[ ! -f .paket.lock ]]; then mono .paket/paket.exe install; fi
mono $FAKE $BUILDSCRIPT "cmdline=$*" --fsiargs -d:MONO
|
CSGOpenSource/elasticsearch-net
|
build.sh
|
Shell
|
apache-2.0
| 324 |
#!/bin/bash
set -e
# Use -gt 1 to consume two arguments per pass in the loop (e.g. each argument has a corresponding value to go with it).
# Use -gt 0 to consume one or more arguments per pass in the loop (e.g. some arguments don't have a corresponding value
# to go with it such as in the --default example).
#
while [[ $# -gt 1 ]]
do
key="$1"
case $key in
-g|--github-repository-name)
GITHUB_REPO_NAME="$2"
shift # past argument
;;
-w|--work-dir)
WORK_DIR="$2"
shift # past argument
;;
-t|--title)
VIDEO_TITLE="$2"
shift # past argument
;;
-vr|--video-resolution)
VIDEO_RESOLUTION="$2"
shift # past argument
;;
-vd|--video-depth)
VIDEO_DEPTH="$2"
shift # past argument
;;
-s|--seconds-per-day)
SEC_PER_DAY="$2"
shift # past argument
;;
-o|--output-video-name)
OUTPUT_VIDEO_NAME="$2"
shift # past argument
;;
*)
# unknown option
;;
esac
shift # past argument or value
done
GITHUB_REPO_NAME="${GITHUB_REPO_NAME:-code-tv/code-tv}"
WORK_DIR="${WORK_DIR:-/work}"
OUTPUT_VIDEO_NAME="${OUTPUT_VIDEO_NAME:-video}"
VIDEO_TITLE="${VIDEO_TITLE:-History of Code}"
VIDEO_RESOLUTION="${VIDEO_RESOLUTION:-960x540}"
VIDEO_DEPTH="${VIDEO_DEPTH:-24}"
SEC_PER_DAY="${SEC_PER_DAY:-1}"
GIT_REPOSITORY_DIR="$WORK_DIR/repository"
echo "Script variables:"
echo GITHUB_REPO_NAME: ${GITHUB_REPO_NAME}
echo WORK_DIR: ${WORK_DIR}
echo OUTPUT_VIDEO_NAME: ${OUTPUT_VIDEO_NAME}
echo GIT_REPOSITORY_DIR: ${GIT_REPOSITORY_DIR}
echo VIDEO_TITLE: ${VIDEO_TITLE}
echo VIDEO_RESOLUTION: ${VIDEO_RESOLUTION}
echo VIDEO_DEPTH: ${VIDEO_DEPTH}
echo SEC_PER_DAY: ${SEC_PER_DAY}
echo "Refreshing $WORK_DIR working directory..."
rm -rf ${WORK_DIR}/*
mkdir -p ${WORK_DIR}
echo "Done."
echo "Cloning $GITHUB_REPO_NAME repository into $GIT_REPOSITORY_DIR directory..."
git clone https://github.com/${GITHUB_REPO_NAME}.git ${GIT_REPOSITORY_DIR}
echo "Done."
echo "Creating $WORK_DIR/$OUTPUT_VIDEO_NAME.ppm file..."
pushd .
cd ${GIT_REPOSITORY_DIR}
screen -dmS "recording" xvfb-run -a -s "-screen 0 ${VIDEO_RESOLUTION}x${VIDEO_DEPTH}" \
gource "-$VIDEO_RESOLUTION" \
-r 30 \
--title "$VIDEO_TITLE" \
--user-image-dir /avatars \
--highlight-all-users \
--seconds-per-day "$SEC_PER_DAY" \
--auto-skip-seconds 1 \
--hide progress,mouse,filenames,dirnames \
--bloom-multiplier 1.25 \
--bloom-intensity 0.9 \
--time-scale 1.0 \
--stop-at-end \
-o "$WORK_DIR/$OUTPUT_VIDEO_NAME.ppm"
popd
echo "Done."
previousSize="0"
currentSize="0"
while [[ ${currentSize} -eq "0" || ${previousSize} -lt ${currentSize} ]] ;
do
sleep 2
previousSize=${currentSize}
currentSize=$(stat -c '%s' ${WORK_DIR}/${OUTPUT_VIDEO_NAME}.ppm)
echo "Current $OUTPUT_VIDEO_NAME.ppm size is $currentSize"
done
echo "The $WORK_DIR/$OUTPUT_VIDEO_NAME.ppm size has stopped growing."
# This hack is needed because gource process doesn't stop;
# MP: it seems that on debian-based node docker image it's not needed
#echo "Force-stopping the recording session."
#screen -r -S "recording" -X quit 2>/dev/null
echo "Creating $WORK_DIR/$OUTPUT_VIDEO_NAME.mp4 file..."
avconv -y -r 30 -f image2pipe \
-loglevel info \
-vcodec ppm \
-i "$WORK_DIR/$OUTPUT_VIDEO_NAME.ppm" \
-vcodec libx264 \
-preset medium \
-pix_fmt yuv420p \
-crf 1 \
-threads 0 \
-bf 0 \
"$WORK_DIR/$OUTPUT_VIDEO_NAME.mp4"
echo "Done."
echo "Removing the temporary $WORK_DIR/$OUTPUT_VIDEO_NAME.ppm file."
rm -f "$WORK_DIR/$OUTPUT_VIDEO_NAME.ppm"
echo "Done."
echo "Removing $GIT_REPOSITORY_DIR directory..."
rm -rf ${GIT_REPOSITORY_DIR}
echo "Done."
|
code-tv/code-tv
|
agent/scripts/render.sh
|
Shell
|
apache-2.0
| 3,703 |
#!/bin/bash
# Removing leftover leases and persistent rules
echo "cleaning up dhcp leases"
rm /var/lib/dhcp/*
# Make sure Udev doesn't block our network
echo "cleaning up udev rules"
rm /etc/udev/rules.d/70-persistent-net.rules
mkdir /etc/udev/rules.d/70-persistent-net.rules
rm -rf /dev/.udev/
rm /lib/udev/rules.d/75-persistent-net-generator.rules
echo "Adding a 2 sec delay to the interface up, to make the dhclient happy"
echo "pre-up sleep 2" >> /etc/network/interfaces
# Zero out the free space to save space in the final image:
dd if=/dev/zero of=/EMPTY bs=1M
rm -f /EMPTY
# Sync to ensure that the delete completes before this moves on.
sync
sync
sync
|
oakensoul/apv-ansible-citadel
|
scripts/cleanup.sh
|
Shell
|
apache-2.0
| 665 |
#!/bin/bash
export PYTHONPATH='../../'
nohup /home/bgshin/.virtualenvs/mxnet/bin/python -u /home/bgshin/works/mxnet_cnn/src/shm/w2v_shm_loader.py > mkshm.txt &
while [ ! -f /dev/shm/s17_y_tst_400 ]
do
sleep 2
done
ls /dev/shm/
echo 'done'
|
bgshin/mxnet_cnn
|
src/shm/mkshm.sh
|
Shell
|
apache-2.0
| 243 |
#! /bin/bash
set -e
source omsagent.version
usage()
{
local basename=`basename $0`
echo "usage: ./$basename <path to omsagent-<version>.universal.x64{.sh, .sha256sums, .asc}> [path for zip output]"
}
input_path=$1
output_path=$2
PACKAGE_NAME="oms$OMS_EXTENSION_VERSION.zip"
if [[ "$1" == "--help" ]]; then
usage
exit 0
elif [[ ! -d $input_path ]]; then
echo "OMS files path '$input_path' not found"
usage
exit 1
fi
if [[ "$output_path" == "" ]]; then
output_path="../"
fi
# Packaging starts here
cp -r ../Utils .
cp ../Common/WALinuxAgent-2.0.16/waagent .
# cleanup packages, ext
rm -rf packages ext/future
mkdir -p packages ext/future
# copy shell bundle to packages/
cp $input_path/omsagent-$OMS_SHELL_BUNDLE_VERSION.universal.x64.* packages/
# copy just the source of python-future
cp -r ext/python-future/src/* ext/future
# sync the file copy
sync
if [[ -f $output_path/$PACKAGE_NAME ]]; then
echo "Removing existing $PACKAGE_NAME ..."
rm -f $output_path/$PACKAGE_NAME
fi
echo "Packaging extension $PACKAGE_NAME to $output_path"
excluded_files="omsagent.version packaging.sh apply_version.sh update_version.sh"
zip -r $output_path/$PACKAGE_NAME * -x $excluded_files "./test/*" "./extension-test/*" "./references" "./ext/python-future/*"
# cleanup newly added dir or files
rm -rf Utils/ waagent
|
vityagi/azure-linux-extensions
|
OmsAgent/packaging.sh
|
Shell
|
apache-2.0
| 1,346 |
#!/usr/bin/env bash
#
# Deploy to Azure, which requires some shell magic to make things happy
#
# TODO: How do we check to see if the azure xplat tools are installed?
# Are the azure tools installed
# if [ `azure -v` ]
# Get the machine name from the command line or prompt for it
if [ "x$1" = "x" ]; then
echo "Please enter a name for your virtual machine: "
read MACHINE_NAME
export NITROGEN_VM_NAME="$MACHINE_NAME"
else
export NITROGEN_VM_NAME="$0"
fi
# Subscription
echo -n "Retrieving subscription..."
export AZURE_SUBSCRIPTION_ID="`azure account show | grep ID | awk '{ print $3 }'`"
echo "done ($AZURE_SUBSCRIPTION_ID)."
# Download management certificate
echo -n "Retrieving management certificate..."
export AZURE_MANAGEMENT_CERT="`azure account cert export | grep 'exported to' | awk '{ print $5 }'`"
echo "done. ($AZURE_MANAGEMENT_CERT)"
# Make SSH Key Pair
echo -n "Making ssh key pair for login..."
openssl req -batch -x509 -nodes -days 365 -newkey rsa:2048 -keyout NitrogenPrivateKey.key -out NitrogenCert.pem >& /dev/null 2>&1
chmod 600 NitrogenPrivateKey.key
export NITROGEN_CERT="NitrogenCert.pem"
export NITROGEN_KEY="NitrogenPrivateKey.key"
echo "done."
# Save settings
echo "export NITROGEN_VM_NAME=\"$NITROGEN_VM_NAME\"" > bash.settings
echo "export AZURE_SUBSCRIPTION_ID=\"$AZURE_SUBSCRIPTION_ID\"" >> bash.settings
echo "export AZURE_MANAGEMENT_CERT=\"$AZURE_MANAGEMENT_CERT\"" >> bash.settings
echo "export NITROGEN_KEY=\"$NITROGEN_KEY\"" >> bash.settings
echo "export NITROGEN_CERT=\"$NITROGEN_CERT\"" >> bash.settings
# Build the vm
echo "Provisioning..."
vagrant up --provider azure
echo -n "done."
# Inform the user
echo "To use the azure created vm, run . bash.settings before you issue any vagrant commands."
|
dhruvplaytabase/vagrant-vms-reemo
|
nitrogen/deploy-azure.sh
|
Shell
|
apache-2.0
| 1,750 |
#!/bin/bash
export ILIB_HOME=../../..
DEST=../locale
rm localize.js
touch localize.js
# add IT, TW, and CH when they are ready
for locale in AU BE CN DE ES FR GB HK IE IN LU MX NL NZ SG US XX KR
do
echo Generating $locale ...
mkdir -p $DEST/und/$locale
../../../bin/tablemaker ${locale}.txt ${locale}.json
mv ${locale}.json $DEST/und/$locale/states.json
mv ${locale}.area.json $DEST/und/$locale/area.json
echo "// Strings for ${locale}" >> localize.js
cat ${locale}.strings.js >> localize.js
echo "" >> localize.js
rm ${locale}.strings.js
done
echo Generating the idd info
../../../bin/tablemaker -t idd.txt $DEST/idd.json
echo Generating the mnc info
../../../bin/tablemaker mnc.txt $DEST/mnc.json
rm mnc.strings.js
for locale in AU FR NZ
do
echo Generating Extended area codes for $locale ...
mkdir -p $DEST/und/$locale
../../../bin/tablemaker ${locale}.geo.txt extstates.json
mv extstates.json $DEST/und/$locale/extstates.json
mv ${locale}.geo.area.json $DEST/und/$locale/extarea.json
echo "// Strings for ${locale}.geo" >> localize.js
cat ${locale}.geo.strings.js >> localize.js
echo "" >> localize.js
rm ${locale}.geo.strings.js
done
echo Running novaloc. This may take a few seconds...
novaloc ilib-phoneres .
echo Cleaning up resources...
cd resources
rm -rf eu ps zu ilibmanifest.json
for file in $(find . -name strings.json)
do
base=$(dirname $file)
mv $base/strings.json $base/phoneres.json
done
mv es/ES/phoneres.json es
mv zh/Hant/HK/phoneres.json zh/Hant
echo Done. Resources are in ./resources
|
iLib-js/iLib
|
js/data/phone/genphone.sh
|
Shell
|
apache-2.0
| 1,530 |
#!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
# Skip API tests
export REPOFS_SKIP_API_TEST=true
# mocha --debug-brk
mocha --reporter spec --compilers js:babel-register --bail --timeout 15000
|
GitbookIO/repofs
|
scripts/test-no-api.sh
|
Shell
|
apache-2.0
| 197 |
#!/bin/bash
DELAY=3
while [ 1 ]; do
IFS=$'\n'
CPU_TEMP=`cat /sys/class/thermal/thermal_zone1/temp`
/opt/beerbox/bin/OLED.py --parameter CPU_TEMP --value $CPU_TEMP
sleep $DELAY
sensor=0
while [ $sensor -le 1 ]
do
temp=`digitemp_DS9097U -t $sensor -q -r2000 -c /opt/beerbox/etc/digitemp.conf`
case $sensor in
0)
/opt/beerbox/bin/OLED.py --parameter TEMP_IN --value $temp
sleep $DELAY
;;
1)
/opt/beerbox/bin/OLED.py --parameter TEMP_OUT --value $temp
sleep $DELAY
;;
esac
echo $temp > /tmp/sensors/$sensor
sensor=$(( $sensor + 1 ))
done
done
|
RipZ/beerbox
|
scripts/temp_to_OLED.sh
|
Shell
|
apache-2.0
| 609 |
#!/bin/bash
set -e
this_local_dir=$(cd `dirname $0` && pwd)
if [ -z "$this_local_dir" ] || [ ! -d "$this_local_dir" ] ; then echo "Failed $0"; exit 1; fi
vocab_full=$1; shift
transcription=$1; shift
perl $this_local_dir/phonetic_transcription_dummy.pl $vocab_full $transcription
|
UFAL-DSG/kams
|
kams/local/prepare_dummy_transcription.sh
|
Shell
|
apache-2.0
| 282 |
#!/bin/bash
PASS=${MYSQL_PASS:-$(pwgen -s 12 1)}
_word=$( [ ${MYSQL_PASS} ] && echo "preset" || echo "random" )
echo "=> Creating MySQL admin user with ${_word} password"
mysql -uroot -e "CREATE USER 'admin'@'%' IDENTIFIED BY '$PASS'"
mysql -uroot -e "GRANT ALL PRIVILEGES ON *.* TO 'admin'@'%' WITH GRANT OPTION"
echo "========================================================================"
echo "You can now connect to this MySQL Server using:"
echo ""
echo " mysql -uadmin -p$PASS -h<host> -P<port>"
echo ""
echo "Please remember to change the above password as soon as possible!"
echo "MySQL user 'root' has no password but only allows local connections"
echo "========================================================================"
|
freistil/freistil-docker-lamp
|
mysql/init/20-admin.sh
|
Shell
|
apache-2.0
| 747 |
#!/bin/bash
CERT=${1-}
PKEY=${2-}
if [[ -z $CERT || -z $PKEY ]] ; then
echo "Usage: ${0} CERT PKEY"
echo ""
exit 1;
fi
openssl x509 -modulus -in $CERT
openssl rsa -modulus -in $PKEY
#####
# EOF
|
bewest/homeware
|
homeware-ec2-dev/check.sh
|
Shell
|
apache-2.0
| 205 |
#!/bin/sh
mpirun -np 4 -machinefile /home/eolson/am-macs/machinefile1234 /home/eolson/am-macs/local/bin/pyMPI tileImage.py -c am8screens $*
|
rpwagner/tiled-display
|
flTile/am8screens.sh
|
Shell
|
apache-2.0
| 140 |
#!/bin/bash
echo "mvn deploy -V -B -DskipTests"
mvn deploy -V -B -DskipTests
|
javalite/activejdbc
|
.travisci/deploy.sh
|
Shell
|
apache-2.0
| 77 |
#!/bin/bash
set -e
trap 'echo >&2 Ctrl+C captured, exiting; exit 1' SIGINT
image="$1"; shift
docker build --pull -t repo-info:local-apk -q -f Dockerfile.local-apk . > /dev/null
docker build --pull -t repo-info:local-dpkg -q -f Dockerfile.local-dpkg . > /dev/null
docker build --pull -t repo-info:local-rpm -q -f Dockerfile.local-rpm . > /dev/null
name="repo-info-local-$$-$RANDOM"
trap "docker rm -vf '$name-data' > /dev/null || :" EXIT
docker create \
--name "$name-data" \
-v /etc \
-v /lib/apk \
-v /usr/lib/rpm \
-v /usr/share/apk \
-v /usr/share/doc \
-v /var/lib \
"$image" \
bogus > /dev/null
echo '# `'"$image"'`'
size="$(
docker inspect -f '{{ .VirtualSize }}' "$image" | awk '{
oneKb = 1000;
oneMb = 1000 * oneKb;
oneGb = 1000 * oneMb;
if ($1 >= oneGb) {
printf "~ %.2f Gb", $1 / oneGb
} else if ($1 >= oneMb) {
printf "~ %.2f Mb", $1 / oneMb
} else if ($1 >= oneKb) {
printf "~ %.2f Kb", $1 / oneKb
} else {
printf "%d bytes", $1
}
}'
)"
docker inspect -f '
## Docker Metadata
- Image ID: `{{ .Id }}`
- Created: `{{ .Created }}`
- Virtual Size: '"$size"'
(total size of all layers on-disk)
- Arch: `{{ .Os }}`/`{{ .Architecture }}`
{{ if .Config.Entrypoint }}- Entrypoint: `{{ json .Config.Entrypoint }}`
{{ end }}{{ if .Config.Cmd }}- Command: `{{ json .Config.Cmd }}`
{{ end }}- Environment:{{ range .Config.Env }}{{ "\n" }} - `{{ . }}`{{ end }}{{ if .Config.Labels }}
- Labels:{{ range $k, $v := .Config.Labels }}{{ "\n" }} - `{{ $k }}={{ $v }}`{{ end }}{{ end }}' "$image"
docker run --rm --volumes-from "$name-data" -v /etc/ssl repo-info:local-apk || :
docker run --rm --volumes-from "$name-data" -v /etc/ssl repo-info:local-dpkg || :
docker run --rm --volumes-from "$name-data" -v /etc/ssl repo-info:local-rpm || :
|
docker-library/repo-info
|
scan-local.sh
|
Shell
|
apache-2.0
| 1,787 |
#!/bin/bash
name_pattern="TA_response*"
find . -maxdepth 3 -mindepth 1 -type f -name $name_pattern | while read dir
do
./sampleRead_exe $dir
done
|
seanpquinn/augerta
|
reco/batch_adelaide_bin_to_text.sh
|
Shell
|
apache-2.0
| 156 |
#!/bin/bash
echo "access with browser http://localhost:8080/kjwikigdocker/"
set -xe
docker run -p 8080:8080 --rm --name kjwikigdocker -e KJWikiG_defaultWord=StartPage georgesan/kjwikigdocker:latest
docker rm -f kjwikigdocker
set +xe
|
george-pon/kjwikigdocker
|
test-run-for-docker.sh
|
Shell
|
apache-2.0
| 235 |
#! /usr/bin/env bash
# Cleaning directories
echo "Initiating Glyphs Scripts setup"
if [ -f "/private/etc/cron.d/sync_git_repos" ] ; then
sudo rm /private/etc/cron.d/sync_git_repos
fi
if [ ! -f "$/etc/cron.d" ] ; then
sudo mkdir /etc/cron.d
fi
cd /etc/cron.d
if [ -e "/tmp/GlyphsScriptsConfi" ] ; then
rm -r /tmp/GlyphsScriptsConfi
fi
mkdir /tmp/GlyphsScriptsConfi
cd ~/Documents
if [ -d "GlyphsScripts" ] ; then
rm -rf GlyphsScripts
fi
mkdir GlyphsScripts
# Unlinking symbolic links, even the legacy ones
cd ~/Library/Application\ Support/FontLab/Studio\ 5/Macros/
if [ -h "Glyphs Export.py" ] ; then
unlink "Glyphs Export.py"
fi
if [ -h "Glyphs Import.py" ] ; then
unlink "Glyphs Import.py"
fi
cd ~/Library/Application\ Support/Glyphs/
if [ -d "Scripts" ] ; then
cd Scripts/
if [ -h "BubbleKern/BubbleKern.py" ] ; then
unlink "BubbleKern/BubbleKern.py"
fi
if [ -h "BubbleKern/Delete Bubble Layers.py" ] ; then
unlink "BubbleKern/Delete Bubble Layers.py"
fi
if [ -h "BubbleKern/Make Bubble Layers.py" ] ; then
unlink "BubbleKern/Make Bubble Layers.py"
fi
if [ -d "BubbleKern" ] ; then
rm -r "BubbleKern"
fi
if [ -h "Deutschmark/Accents" ] ; then
unlink "Deutschmark/Accents"
fi
if [ -h "Deutschmark/Font" ] ; then
unlink "Deutschmark/Font"
fi
if [ -h "Deutschmark/Glyphs" ] ; then
unlink "Deutschmark/Glyphs"
fi
if [ -h "Deutschmark/Metrics" ] ; then
unlink "Deutschmark/Metrics"
fi
if [ -h "Deutschmark/Sketching" ] ; then
unlink "Deutschmark/Sketching"
fi
if [ -d "Deutschmark" ] ; then
rm -r "Deutschmark"
fi
if [ -h "GSPen.py" ] ; then
unlink "GSPen.py"
fi
if [ -f "GSPen.pyc" ] ; then
rm "GSPen.pyc"
fi
if [ -h "HuertaTipografica" ] ; then
unlink "HuertaTipografica"
fi
if [ -h "justanotherfoundry" ] ; then
unlink "justanotherfoundry"
fi
if [ -h "mekkablue" ] ; then
unlink "mekkablue"
fi
if [ -h "objectsGS.py" ] ; then
unlink "objectsGS.py"
fi
if [ -f "objectsGS.pyc" ] ; then
rm "objectsGS.pyc"
fi
if [ -h "schriftgestalt/Autopsy.py" ] ; then
unlink "schriftgestalt/Autopsy.py"
fi
if [ -h "schriftgestalt/Delete Images.py" ] ; then
unlink "schriftgestalt/Delete Images.py"
fi
if [ -h "schriftgestalt/Helper" ] ; then
unlink "schriftgestalt/Helper"
fi
if [ -h "schriftgestalt/Import SVG.py" ] ; then
unlink "schriftgestalt/Import SVG.py"
fi
if [ -h "schriftgestalt/Import SVGs2Glyphs.py" ] ; then
unlink "schriftgestalt/Import SVGs2Glyphs.py"
fi
if [ -h "schriftgestalt/Make Unicase Font.py" ] ; then
unlink "schriftgestalt/Make Unicase Font.py"
fi
if [ -h "schriftgestalt/MakeProdunctionFont.py" ] ; then
unlink "schriftgestalt/MakeProdunctionFont.py"
fi
if [ -h "schriftgestalt/Metrics & Classes" ] ; then
unlink "schriftgestalt/Metrics & Classes"
fi
if [ -h "schriftgestalt/Other Scripts" ] ; then
unlink "schriftgestalt/Other Scripts"
fi
if [ -d "schriftgestalt" ] ; then
rm -r "schriftgestalt"
fi
if [ -h "SimonCozens" ] ; then
unlink "SimonCozens"
fi
if [ -h "StringSmash" ] ; then
unlink "StringSmash"
fi
if [ -h "Tosche" ] ; then
unlink "Tosche"
fi
if [ -h "Wei" ] ; then
unlink "Wei"
fi
if [ -h "Nevu" ] ; then
unlink "Nevu"
fi
if [ -h "GuidoFerreyra" ] ; then
unlink "GuidoFerreyra"
fi
if [ -h "ohBendy" ] ; then
unlink "ohBendy"
fi
fi
cd ~/Library/Application\ Support/Glyphs/
if [ -d "Plugins" ] ; then
cd Plugins/
if [ -h "Autopsy.glyphsPlugin" ] ; then
unlink "Autopsy.glyphsPlugin"
fi
if [ -h "BroadNibber.glyphsFilter" ] ; then
unlink "BroadNibber.glyphsFilter"
fi
if [ -h "CurveEQ.glyphsFilter" ] ; then
unlink "CurveEQ.glyphsFilter"
fi
if [ -h "CutAndShake.glyphsFilter" ] ; then
unlink "CutAndShake.glyphsFilter"
fi
if [ -h "DrawBot.glyphsPlugin" ] ; then
unlink "DrawBot.glyphsPlugin"
fi
if [ -h "FixZeroHandles.glyphsFilter" ] ; then
unlink "FixZeroHandles.glyphsFilter"
fi
if [ -h "FontNote.glyphsPalette" ] ; then
unlink "FontNote.glyphsPalette"
fi
if [ -h "GlyphNote.glyphsPalette" ] ; then
unlink "GlyphNote.glyphsPalette"
fi
if [ -h "GlyphsExpandPathsPreviewTool.glyphsReporter" ] ; then
unlink "GlyphsExpandPathsPreviewTool.glyphsReporter"
fi
if [ -h "GlyphsGit.glyphsPlugin" ] ; then
unlink "GlyphsGit.glyphsPlugin"
fi
if [ -h "GlyphsInLabelColor.glyphsReporter" ] ; then
unlink "GlyphsInLabelColor.glyphsReporter"
fi
if [ -h "InsertInflections.glyphsFilter" ] ; then
unlink "InsertInflections.glyphsFilter"
fi
if [ -h "Inverter.glyphsFilter" ] ; then
unlink "Inverter.glyphsFilter"
fi
if [ -h "LabelColor.glyphsReporter" ] ; then
unlink "LabelColor.glyphsReporter"
fi
if [ -h "LayerGeek.glyphsFilter" ] ; then
unlink "LayerGeek.glyphsFilter"
fi
if [ -h "MakeCorner.glyphsFilter" ] ; then
unlink "MakeCorner.glyphsFilter"
fi
if [ -h "Noodler.glyphsFilter" ] ; then
unlink "Noodler.glyphsFilter"
fi
if [ -h "OffsetPreview.glyphsReporter" ] ; then
unlink "OffsetPreview.glyphsReporter"
fi
if [ -h "RedArrow.glyphsReporter" ] ; then
unlink "RedArrow.glyphsReporter"
fi
if [ -h "ShowAngledHandles.glyphsReporter" ] ; then
unlink "ShowAngledHandles.glyphsReporter"
fi
if [ -h "ShowBlackFill.glyphsReporter" ] ; then
unlink "ShowBlackFill.glyphsReporter"
fi
if [ -h "ShowComponentOrder.glyphsReporter" ] ; then
unlink "ShowComponentOrder.glyphsReporter"
fi
if [ -h "ShowCoordinatesOfSelectedNodes.glyphsReporter" ] ; then
unlink "ShowCoordinatesOfSelectedNodes.glyphsReporter"
fi
if [ -h "ShowCrosshair.glyphsReporter" ] ; then
unlink "ShowCrosshair.glyphsReporter"
fi
if [ -h "ShowDistanceAndAngleOfNodes.glyphsReporter" ] ; then
unlink "ShowDistanceAndAngleOfNodes.glyphsReporter"
fi
if [ -h "ShowExportStatus.glyphsReporter" ] ; then
unlink "ShowExportStatus.glyphsReporter"
fi
if [ -h "ShowFilledPreview.glyphsReporter" ] ; then
unlink "ShowFilledPreview.glyphsReporter"
fi
if [ -h "ShowHandlesEverywhere.glyphsReporter" ] ; then
unlink "ShowHandlesEverywhere.glyphsReporter"
fi
if [ -h "ShowInterpolation.glyphsReporter" ] ; then
unlink "ShowInterpolation.glyphsReporter"
fi
if [ -h "ShowKernBubbles.glyphsReporter" ] ; then
unlink "ShowKernBubbles.glyphsReporter"
fi
if [ -h "ShowKerningGroupReference.glyphsReporter" ] ; then
unlink "ShowKerningGroupReference.glyphsReporter"
fi
if [ -h "ShowMetricsKeys.glyphsReporter" ] ; then
unlink "ShowMetricsKeys.glyphsReporter"
fi
if [ -h "ShowNextMaster.glyphsReporter" ] ; then
unlink "ShowNextMaster.glyphsReporter"
fi
if [ -h "ShowNodeCount.glyphsReporter" ] ; then
unlink "ShowNodeCount.glyphsReporter"
fi
if [ -h "ShowPathArea.glyphsReporter" ] ; then
unlink "ShowPathArea.glyphsReporter"
fi
if [ -h "ShowRotated.glyphsReporter" ] ; then
unlink "ShowRotated.glyphsReporter"
fi
if [ -h "ShowSiblings.glyphsReporter" ] ; then
unlink "ShowSiblings.glyphsReporter"
fi
if [ -h "SmartPlumblines.glyphsReporter" ] ; then
unlink "SmartPlumblines.glyphsReporter"
fi
if [ -h "Symmetry.glyphsReporter" ] ; then
unlink "Symmetry.glyphsReporter"
fi
if [ -h "uncoverXHeight.glyphsReporter" ] ; then
unlink "uncoverXHeight.glyphsReporter"
fi
if [ -h "word-o-mat.glyphsPlugin" ] ; then
unlink "word-o-mat.glyphsPlugin"
fi
if [ -h "showNextFont.glyphsReporter" ] ; then
unlink "showNextFont.glyphsReporter"
fi
if [ -h "showNextFontAnchors.glyphsReporter" ] ; then
unlink "showNextFontAnchors.glyphsReporter"
fi
if [ -h "showCoordinates.glyphsReporter" ] ; then
unlink "showCoordinates.glyphsReporter"
fi
if [ -h "showItalic.glyphsReporter" ] ; then
unlink "showItalic.glyphsReporter"
fi
if [ -h "ShowTopsAndBottoms.glyphsReporter" ] ; then
unlink "ShowTopsAndBottoms.glyphsReporter"
fi
if [ -h "GlobalGlyph.glyphsReporter" ] ; then
unlink "GlobalGlyph.glyphsReporter"
fi
if [ -h "ShowDistanceBetweenTwoPoints.glyphsReporter" ] ; then
unlink "ShowDistanceBetweenTwoPoints.glyphsReporter"
fi
if [ -h "ShowFlippedComponents.glyphsReporter" ] ; then
unlink "ShowFlippedComponents.glyphsReporter"
fi
if [ -h "ShowMasterName&Glyph.glyphsReporter" ] ; then
unlink "ShowMasterName&Glyph.glyphsReporter"
fi
fi
cd ~/Library/Application\ Support/Glyphs/
if [ ! -d "$Scripts" ] ; then
mkdir Scripts
fi
# Making Plugin folder if there isn't
cd ~/Library/Application\ Support/Glyphs/
if [ ! -d "$Plugins" ] ; then
mkdir Plugins
fi
cd ~/Documents
cd GlyphsScripts
# Cloning Repositories
echo 'Cloning Repositories'
git clone https://github.com/BelaFrank/StringSmash.git BelaFrank_StringSmash
cd BelaFrank_StringSmash
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Scripts/
ln -s ~/Documents/GlyphsScripts/BelaFrank_StringSmash/Glyphs/StringSmash StringSmash
cd ~/Documents/GlyphsScripts/
echo '==================================='
echo 'Done BelaFrank String Smash Scripts'
echo '==================================='
git clone https://github.com/DeutschMark/Show-Smart-Plumblines.git DeutschMark_Show-Smart-Plumblines
cd DeutschMark_Show-Smart-Plumblines
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/DeutschMark_Show-Smart-Plumblines/SmartPlumblines.glyphsReporter SmartPlumblines.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '======================================'
echo 'Done DeutschMark Show Smart Plumblines'
echo '======================================'
git clone https://github.com/DeutschMark/Uncover-xHeight.git DeutschMark_Uncover-xHeight
cd DeutschMark_Uncover-xHeight
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/DeutschMark_Uncover-xHeight/uncoverXHeight.glyphsReporter uncoverXHeight.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '================================'
echo 'Done DeutschMark Uncover xHeight'
echo '================================'
git clone https://github.com/DeutschMark/Show-Siblings.git DeutschMark_Show-Siblings
cd DeutschMark_Show-Siblings
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/DeutschMark_Show-Siblings/ShowSiblings.glyphsReporter ShowSiblings.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '=============================='
echo 'Done DeutschMark Show Siblings'
echo '=============================='
git clone https://github.com/DeutschMark/Show-Distance-And-Angle-Of-Nodes.git DeutschMark_Show-Distance-And-Angle-Of-Nodes
cd DeutschMark_Show-Distance-And-Angle-Of-Nodes
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/DeutschMark_Show-Distance-And-Angle-Of-Nodes/ShowDistanceAndAngleOfNodes.glyphsReporter ShowDistanceAndAngleOfNodes.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '================================================='
echo 'Done DeutschMark Show Distance And Angle Of Nodes'
echo '================================================='
git clone https://github.com/DeutschMark/Show-Next-Master.git DeutschMark_Show-Next-Master
cd DeutschMark_Show-Next-Master
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/DeutschMark_Show-Next-Master/ShowNextMaster.glyphsReporter ShowNextMaster.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '================================='
echo 'Done DeutschMark Show Next Master'
echo '================================='
git clone https://github.com/DeutschMark/Show-Kerning-Group-Reference.git DeutschMark_Show-Kerning-Group-Reference
cd DeutschMark_Show-Kerning-Group-Reference
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/DeutschMark_Show-Kerning-Group-Reference/ShowKerningGroupReference.glyphsReporter ShowKerningGroupReference.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '============================================='
echo 'Done DeutschMark Show Kerning Group Reference'
echo '============================================='
git clone https://github.com/DeutschMark/Show-Rotated.git DeutschMark_Show-Rotated
cd DeutschMark_Show-Rotated
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/DeutschMark_Show-Rotated/ShowRotated.glyphsReporter ShowRotated.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '============================='
echo 'Done DeutschMark Show Rotated'
echo '============================='
git clone https://github.com/DeutschMark/Show-Label-Color.git DeutschMark_Show-Label-Color
cd DeutschMark_Show-Label-Color
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/DeutschMark_Show-Label-Color/LabelColor.glyphsReporter LabelColor.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '================================='
echo 'Done DeutschMark Show Label Color'
echo '================================='
git clone https://github.com/DeutschMark/Show-Node-Count.git DeutschMark_Show-Node-Count
cd DeutschMark_Show-Node-Count
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/DeutschMark_Show-Node-Count/ShowNodeCount.glyphsReporter ShowNodeCount.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '================================'
echo 'Done DeutschMark Show Node Count'
echo '================================'
git clone https://github.com/DeutschMark/Glyphsapp-Scripts.git DeutschMark_Glyphsapp-Scripts
cd DeutschMark_Glyphsapp-Scripts
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
if [ -e "~/Library/Application Support/Glyphs/Scripts/Deutschmark" ] ; then
rm -r ~/Library/Application\ Support/Glyphs/Scripts/Deutschmark
fi
mkdir ~/Library/Application\ Support/Glyphs/Scripts/Deutschmark
cd ~/Library/Application\ Support/Glyphs/Scripts/Deutschmark/
ln -s ~/Documents/GlyphsScripts/DeutschMark_Glyphsapp-Scripts/Accents/ Accents
ln -s ~/Documents/GlyphsScripts/DeutschMark_Glyphsapp-Scripts/Font/ Font
ln -s ~/Documents/GlyphsScripts/DeutschMark_Glyphsapp-Scripts/Glyphs/ Glyphs
ln -s ~/Documents/GlyphsScripts/DeutschMark_Glyphsapp-Scripts/Metrics/ Metrics
ln -s ~/Documents/GlyphsScripts/DeutschMark_Glyphsapp-Scripts/Sketching/ Sketching
cd ~/Documents/GlyphsScripts/
echo '=================================='
echo 'Done DeutschMark Glyphsapp Scripts'
echo '=================================='
git clone https://github.com/jenskutilek/Curve-Equalizer.git jenskutilek_Curve-Equalizer
cd jenskutilek_Curve-Equalizer
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/jenskutilek_Curve-Equalizer/Glyphs/CurveEQ.glyphsFilter CurveEQ.glyphsFilter
cd ~/Documents/GlyphsScripts/
echo '======================================='
echo 'Done jenskutilek Curve-Equalizer Plugin'
echo '======================================='
git clone https://github.com/jenskutilek/RedArrow-Glyphs.git jenskutilek_RedArrow-Glyphs
cd jenskutilek_RedArrow-Glyphs
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/jenskutilek_RedArrow-Glyphs/RedArrow.glyphsReporter RedArrow.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '======================================='
echo 'Done jenskutilek RedArrow-Glyphs Plugin'
echo '======================================='
git clone https://github.com/justanotherfoundry/glyphsapp-scripts.git justanotherfoundry_glyphsapp-scripts
cd justanotherfoundry_glyphsapp-scripts
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Scripts/
ln -s ~/Documents/GlyphsScripts/justanotherfoundry_glyphsapp-scripts/ justanotherfoundry
cd ~/Documents/GlyphsScripts/
echo '==============================='
echo 'Done justanotherfoundry Scripts'
echo '==============================='
git clone https://github.com/mekkablue/BroadNibber.git mekkablue_BroadNibber
cd mekkablue_BroadNibber
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_BroadNibber/BroadNibber.glyphsFilter BroadNibber.glyphsFilter
cd ~/Documents/GlyphsScripts/
echo '=================================='
echo 'Done mekkablue Broad Nibber Plugin'
echo '=================================='
git clone https://github.com/mekkablue/CutAndShake.git mekkablue_CutAndShake
cd mekkablue_CutAndShake
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_CutAndShake/CutAndShake.glyphsFilter CutAndShake.glyphsFilter
cd ~/Documents/GlyphsScripts/
echo '==================================='
echo 'Done mekkablue Cut And Shake Plugin'
echo '==================================='
git clone https://github.com/mekkablue/FixZeroHandles.git mekkablue_FixZeroHandles
cd mekkablue_FixZeroHandles
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_FixZeroHandles/FixZeroHandles.glyphsFilter FixZeroHandles.glyphsFilter
cd ~/Documents/GlyphsScripts/
echo '===================================='
echo 'Done mekkablue FixZeroHandles Plugin'
echo '===================================='
git clone https://github.com/mekkablue/Glyphs-Scripts.git mekkablue_Glyphs-Scripts
cd mekkablue_Glyphs-Scripts
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Scripts/
ln -s ~/Documents/GlyphsScripts/mekkablue_Glyphs-Scripts/ mekkablue
cd ~/Documents/GlyphsScripts/
echo '======================'
echo 'Done mekkablue Scripts'
echo '======================'
git clone https://github.com/mekkablue/InsertInflections.git mekkablue_InsertInflections
cd mekkablue_InsertInflections
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_InsertInflections/InsertInflections.glyphsFilter InsertInflections.glyphsFilter
cd ~/Documents/GlyphsScripts/
echo '========================================'
echo 'Done mekkablue Insert Inflections Plugin'
echo '========================================'
git clone https://github.com/mekkablue/Inverter.git mekkablue_Inverter
cd mekkablue_Inverter
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_Inverter/Inverter.glyphsFilter Inverter.glyphsFilter
cd ~/Documents/GlyphsScripts/
echo '=============================='
echo 'Done mekkablue Inverter Plugin'
echo '=============================='
git clone https://github.com/mekkablue/LayerGeek.git mekkablue_LayerGeek
cd mekkablue_LayerGeek
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_LayerGeek/LayerGeek.glyphsFilter LayerGeek.glyphsFilter
cd ~/Documents/GlyphsScripts/
echo '==============================='
echo 'Done mekkablue LayerGeek Plugin'
echo '==============================='
git clone https://github.com/mekkablue/MakeCorner.git mekkablue_MakeCorner
cd mekkablue_MakeCorner
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_MakeCorner/MakeCorner.glyphsFilter MakeCorner.glyphsFilter
cd ~/Documents/GlyphsScripts/
echo '================================'
echo 'Done mekkablue MakeCorner Plugin'
echo '================================'
git clone https://github.com/mekkablue/NotePalettes.git mekkablue_NotePalettes
cd mekkablue_NotePalettes
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_NotePalettes/FontNote.glyphsPalette FontNote.glyphsPalette
ln -s ~/Documents/GlyphsScripts/mekkablue_NotePalettes/GlyphNote.glyphsPalette GlyphNote.glyphsPalette
cd ~/Documents/GlyphsScripts/
echo '==================================='
echo 'Done mekkablue Note Palettes Plugin'
echo '==================================='
git clone https://github.com/mekkablue/Noodler.git mekkablue_Noodler
cd mekkablue_Noodler
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_Noodler/Noodler.glyphsFilter Noodler.glyphsFilter
cd ~/Documents/GlyphsScripts/
echo '============================='
echo 'Done mekkablue Noodler Plugin'
echo '============================='
git clone https://github.com/mekkablue/ShowAngledHandles.git mekkablue_ShowAngledHandles
cd mekkablue_ShowAngledHandles
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_ShowAngledHandles/ShowAngledHandles.glyphsReporter ShowAngledHandles.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '========================================='
echo 'Done mekkablue Show Angled Handles Plugin'
echo '========================================='
git clone https://github.com/mekkablue/ShowComponentOrder.git mekkablue_ShowComponentOrder
cd mekkablue_ShowComponentOrder
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_ShowComponentOrder/ShowComponentOrder.glyphsReporter ShowComponentOrder.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '=========================================='
echo 'Done mekkablue Show Component Order Plugin'
echo '=========================================='
git clone https://github.com/mekkablue/ShowCoordinatesOfSelectedNodes.git mekkablue_ShowCoordinatesOfSelectedNodes
cd mekkablue_ShowCoordinatesOfSelectedNodes
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_ShowCoordinatesOfSelectedNodes/ShowCoordinatesOfSelectedNodes.glyphsReporter ShowCoordinatesOfSelectedNodes.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '========================================================'
echo 'Done mekkablue Show Coordinates Of Selected Nodes Plugin'
echo '========================================================'
git clone https://github.com/mekkablue/ShowCrosshair.git mekkablue_ShowCrosshair
cd mekkablue_ShowCrosshair
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_ShowCrosshair/ShowCrosshair.glyphsReporter ShowCrosshair.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '====================================='
echo 'Done mekkablue Show Cross hair Plugin'
echo '====================================='
git clone https://github.com/mekkablue/ShowFilledPreview.git mekkablue_ShowFilledPreview
cd mekkablue_ShowFilledPreview
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_ShowFilledPreview/ShowFilledPreview.glyphsReporter ShowFilledPreview.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '========================================='
echo 'Done mekkablue Show Filled Preview Plugin'
echo '========================================='
git clone https://github.com/mekkablue/ShowGlyphsInLabelColor.git mekkablue_ShowGlyphsInLabelColor
cd mekkablue_ShowGlyphsInLabelColor
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_ShowGlyphsInLabelColor/GlyphsInLabelColor.glyphsReporter GlyphsInLabelColor.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '================================================'
echo 'Done mekkablue Show Glyphs In Label Color Plugin'
echo '================================================'
git clone https://github.com/mekkablue/ShowHandlesEverywhere.git mekkablue_ShowHandlesEverywhere
cd mekkablue_ShowHandlesEverywhere
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_ShowHandlesEverywhere/ShowHandlesEverywhere.glyphsReporter ShowHandlesEverywhere.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '============================================='
echo 'Done mekkablue Show Handles Everywhere Plugin'
echo '============================================='
git clone https://github.com/mekkablue/ShowInterpolations.git mekkablue_ShowInterpolations
cd mekkablue_ShowInterpolations
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_ShowInterpolations/ShowInterpolation.glyphsReporter ShowInterpolation.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '========================================='
echo 'Done mekkablue Show Interpolations Plugin'
echo '========================================='
git clone https://github.com/mekkablue/ShowMetricsKeys.git mekkablue_ShowMetricsKeys
cd mekkablue_ShowMetricsKeys
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_ShowMetricsKeys/ShowMetricsKeys.glyphsReporter ShowMetricsKeys.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '======================================='
echo 'Done mekkablue Show Metrics Keys Plugin'
echo '======================================='
git clone https://github.com/mekkablue/ShowOffsetCurveParameterPreview.git mekkablue_ShowOffsetCurveParameterPreview
cd mekkablue_ShowOffsetCurveParameterPreview
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_ShowOffsetCurveParameterPreview/OffsetPreview.glyphsReporter OffsetPreview.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '========================================================='
echo 'Done mekkablue Show Offset Curve Parameter Preview Plugin'
echo '========================================================='
git clone https://github.com/mekkablue/Symmetry.git mekkablue_Symmetry
cd mekkablue_Symmetry
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_Symmetry/Symmetry.glyphsReporter Symmetry.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '=============================='
echo 'Done mekkablue Symmetry Plugin'
echo '=============================='
git clone https://github.com/mekkablue/ShowExportStatus.git mekkablue_ShowExportStatus
cd mekkablue_ShowExportStatus
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_ShowExportStatus/ShowExportStatus.glyphsReporter ShowExportStatus.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '======================================'
echo 'Done mekkablue ShowExportStatus Plugin'
echo '======================================'
git clone https://github.com/mekkablue/ShowDistanceBetweenTwoPoints.git mekkablue_ShowDistanceBetweenTwoPoints
cd mekkablue_ShowDistanceBetweenTwoPoints
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_ShowDistanceBetweenTwoPoints/ShowDistanceBetweenTwoPoints.glyphsReporter ShowDistanceBetweenTwoPoints.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '=================================================='
echo 'Done mekkablue ShowDistanceBetweenTwoPoints Plugin'
echo '=================================================='
git clone https://github.com/schriftgestalt/Autopsy-Plugin.git schriftgestalt_Autopsy-Plugin
cd schriftgestalt_Autopsy-Plugin
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/schriftgestalt_Autopsy-Plugin/Autopsy.glyphsPlugin Autopsy.glyphsPlugin
cd ~/Documents/GlyphsScripts/
echo '=================================='
echo 'Done schriftgestalt Autopsy Plugin'
echo '=================================='
git clone https://github.com/schriftgestalt/Glyphs-Plugins.git schriftgestalt_Glyphs-Plugins
cd schriftgestalt_Glyphs-Plugins
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/schriftgestalt_Glyphs-Plugins/GlyphsExpandPathsPreviewTool.glyphsReporter GlyphsExpandPathsPreviewTool.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '====================================================='
echo 'Done schriftgestalt Expand Paths Preview Tool Plugin'
echo '====================================================='
git clone https://github.com/schriftgestalt/Glyphs-Scripts.git schriftgestalt_Glyphs-Scripts
cd schriftgestalt_Glyphs-Scripts
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
if [ -e "~/Library/Application Support/Glyphs/Scripts/schriftgestalt" ] ; then
rm -r ~/Library/Application\ Support/Glyphs/Scripts/schriftgestalt
fi
mkdir ~/Library/Application\ Support/Glyphs/Scripts/schriftgestalt
cd ~/Library/Application\ Support/Glyphs/Scripts/schriftgestalt/
ln -s ~/Documents/GlyphsScripts/schriftgestalt_Glyphs-Scripts/Autopsy.py Autopsy.py
ln -s ~/Documents/GlyphsScripts/schriftgestalt_Glyphs-Scripts/Delete\ Images.py Delete\ Images.py
ln -s ~/Documents/GlyphsScripts/schriftgestalt_Glyphs-Scripts/Helper/ Helper
ln -s ~/Documents/GlyphsScripts/schriftgestalt_Glyphs-Scripts/Import\ SVG.py Import\ SVG.py
ln -s ~/Documents/GlyphsScripts/schriftgestalt_Glyphs-Scripts/Import\ SVGs2Glyphs.py Import\ SVGs2Glyphs.py
ln -s ~/Documents/GlyphsScripts/schriftgestalt_Glyphs-Scripts/Make\ Unicase\ Font.py Make\ Unicase\ Font.py
ln -s ~/Documents/GlyphsScripts/schriftgestalt_Glyphs-Scripts/MakeProdunctionFont.py MakeProdunctionFont.py
ln -s ~/Documents/GlyphsScripts/schriftgestalt_Glyphs-Scripts/Metrics\ \&\ Classes/ Metrics\ \&\ Classes
ln -s ~/Documents/GlyphsScripts/schriftgestalt_Glyphs-Scripts/Other\ Scripts/ Other\ Scripts
cd ~/Library/Application\ Support/Glyphs/Scripts/
ln -s ~/Documents/GlyphsScripts/schriftgestalt_Glyphs-Scripts/objectsGS.py objectsGS.py
ln -s ~/Documents/GlyphsScripts/schriftgestalt_Glyphs-Scripts/GSPen.py GSPen.py
cd ~/Library/Application\ Support/FontLab/Studio\ 5/Macros/
ln -s ~/Documents/GlyphsScripts/schriftgestalt_Glyphs-Scripts/Glyphs\ Export.py Glyphs\ Export.py
ln -s ~/Documents/GlyphsScripts/schriftgestalt_Glyphs-Scripts/Glyphs\ Import.py Glyphs\ Import.py
cd ~/Documents/GlyphsScripts/
echo '==========================='
echo 'Done schriftgestalt Scripts'
echo '==========================='
git clone https://github.com/schriftgestalt/word-o-mat.git schriftgestalt_word-o-mat
cd schriftgestalt_word-o-mat
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/schriftgestalt_word-o-mat/word-o-mat.glyphsPlugin word-o-mat.glyphsPlugin
cd ~/Documents/GlyphsScripts/
echo '====================================='
echo 'Done schriftgestalt word-o-mat Plugin'
echo '====================================='
git clone https://github.com/schriftgestalt/DrawBotGlyphsPlugin.git schriftgestalt_DrawBotGlyphsPlugin
cd schriftgestalt_DrawBotGlyphsPlugin
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/schriftgestalt_DrawBotGlyphsPlugin/DrawBot.glyphsPlugin DrawBot.glyphsPlugin
cd ~/Documents/GlyphsScripts/
echo '========================================'
echo 'Done schriftgestalt DrawBotGlyphs Plugin'
echo '========================================'
git clone https://github.com/Tosche/Glyphs-Scripts.git Tosche_Glyphs-Scripts
cd Tosche_Glyphs-Scripts
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Scripts/
ln -s ~/Documents/GlyphsScripts/Tosche_Glyphs-Scripts/ Tosche
cd ~/Documents/GlyphsScripts/
echo '==================='
echo 'Done Tosche Scripts'
echo '==================='
git clone https://github.com/Tosche/ShowBlackFill.git Tosche_ShowBlackFill
cd Tosche_ShowBlackFill
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/Tosche_ShowBlackFill/ShowBlackFill.glyphsReporter ShowBlackFill.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '=================================='
echo 'Done Tosche Show Black Fill Plugin'
echo '=================================='
git clone https://github.com/Tosche/BubbleKern.git Tosche_BubbleKern
cd Tosche_BubbleKern
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/Tosche_BubbleKern/ShowKernBubbles.glyphsReporter ShowKernBubbles.glyphsReporter
if [ -e "~/Library/Application Support/Glyphs/Scripts/BubbleKern" ] ; then
rm -r ~/Library/Application\ Support/Glyphs/Scripts/BubbleKern
fi
mkdir ~/Library/Application\ Support/Glyphs/Scripts/BubbleKern
cd ~/Library/Application\ Support/Glyphs/Scripts/BubbleKern/
ln -s ~/Documents/GlyphsScripts/Tosche_BubbleKern/BubbleKern.py BubbleKern.py
ln -s ~/Documents/GlyphsScripts/Tosche_BubbleKern/Delete\ Bubble\ Layers.py Delete\ Bubble\ Layers.py
ln -s ~/Documents/GlyphsScripts/Tosche_BubbleKern/Make\ Bubble\ Layers.py Make\ Bubble\ Layers.py
cd ~/Documents/GlyphsScripts/
echo '======================'
echo 'Done Tosche BubbleKern'
echo '======================'
git clone https://github.com/simoncozens/GlyphsGit.git SimonCozens_GlyphsGit
cd SimonCozens_GlyphsGit
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/SimonCozens_GlyphsGit/GlyphsGit.glyphsPlugin GlyphsGit.glyphsPlugin
cd ~/Documents/GlyphsScripts/
echo '================================='
echo 'Done SimonCozens GlyphsGit Plugin'
echo '================================='
git clone https://github.com/simoncozens/GlyphsPlugins.git SimonCozens_ShowPathArea
cd SimonCozens_ShowPathArea
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/SimonCozens_ShowPathArea/ShowPathArea.glyphsReporter ShowPathArea.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '===================================='
echo 'Done SimonCozens ShowPathArea Plugin'
echo '===================================='
git clone https://github.com/simoncozens/GlyphsScripts.git SimonCozens_Scripts
cd SimonCozens_Scripts
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Scripts/
ln -s ~/Documents/GlyphsScripts/SimonCozens_Scripts/ SimonCozens
cd ~/Documents/GlyphsScripts/
echo '=========================='
echo "Done SimonCozens's Scripts"
echo '=========================='
git clone https://github.com/huertatipografica/huertatipografica-scripts.git HuertaTipografica_Scripts
cd HuertaTipografica_Scripts
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Scripts/
ln -s ~/Documents/GlyphsScripts/HuertaTipografica_Scripts/ HuertaTipografica
cd ~/Documents/GlyphsScripts/
echo '================================='
echo "Done Huerta Tipografica's Scripts"
echo '================================='
git clone https://github.com/weiweihuanghuang/wei-glyphs-scripts.git wei_scripts
cd wei_scripts
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Scripts/
ln -s ~/Documents/GlyphsScripts/wei_scripts/ Wei
cd ~/Documents/GlyphsScripts/
echo '========================'
echo "Done Wei Huang's Scripts"
echo '========================'
git clone https://github.com/Nevu/Nevu_ScriptsForGlyphs.git Nevu_Scripts
cd Nevu_Scripts
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Scripts/
ln -s ~/Documents/GlyphsScripts/Nevu_Scripts/ Nevu
cd ~/Documents/GlyphsScripts/
echo '==================='
echo "Done Nevu's Scripts"
echo '==================='
############################################
##### Start of New Scripts and Filters #####
############################################
git clone https://github.com/guidoferreyra/Glyphs-Scripts.git GuidoFerreyra_Scripts
cd GuidoFerreyra_Scripts
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Scripts/
ln -s ~/Documents/GlyphsScripts/GuidoFerreyra_Scripts/ GuidoFerreyra
cd ~/Documents/GlyphsScripts/
echo '============================='
echo "Done Guido Ferreyra's Scripts"
echo '============================='
git clone https://github.com/guidoferreyra/ShowNextFont.git GuidoFerreyra_ShowNextFont
cd GuidoFerreyra_ShowNextFont
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/GuidoFerreyra_ShowNextFont/showNextFont.glyphsReporter showNextFont.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '======================================'
echo 'Done GuidoFerreyra_ShowNextFont Plugin'
echo '======================================'
git clone https://github.com/guidoferreyra/ShowNextFontAnchors.git GuidoFerreyra_ShowNextFontAnchors
cd GuidoFerreyra_ShowNextFontAnchors
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/GuidoFerreyra_ShowNextFontAnchors/showNextFontAnchors.glyphsReporter showNextFontAnchors.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '============================================='
echo 'Done GuidoFerreyra_ShowNextFontAnchors Plugin'
echo '============================================='
git clone https://github.com/guidoferreyra/showAnchorCoordinates.git GuidoFerreyra_showAnchorCoordinates
cd GuidoFerreyra_showAnchorCoordinates
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/GuidoFerreyra_showAnchorCoordinates/showAnchorCoordinates.glyphsReporter showAnchorCoordinates.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '==============================================='
echo 'Done GuidoFerreyra_showAnchorCoordinates Plugin'
echo '==============================================='
git clone https://github.com/guidoferreyra/showCoordinates.git GuidoFerreyra_showCoordinates
cd GuidoFerreyra_showCoordinates
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/GuidoFerreyra_showCoordinates/showCoordinates.glyphsReporter showCoordinates.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '========================================='
echo 'Done GuidoFerreyra_showCoordinates Plugin'
echo '========================================='
git clone https://github.com/mekkablue/ShowItalic.git mekkablue_ShowItalic
cd mekkablue_ShowItalic
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_ShowItalic/ShowItalic.glyphsReporter ShowItalic.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '================================'
echo 'Done mekkablue ShowItalic Plugin'
echo '================================'
git clone https://github.com/mekkablue/ShowTopsAndBottoms.git mekkablue_ShowTopsAndBottoms
cd mekkablue_ShowTopsAndBottoms
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/mekkablue_ShowTopsAndBottoms/ShowTopsAndBottoms.glyphsReporter ShowTopsAndBottoms.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '========================================'
echo 'Done mekkablue ShowTopsAndBottoms Plugin'
echo '========================================'
git clone https://github.com/Nevu/Show-Global-Glyph.git nevu_ShowGlobalGlyphs
cd nevu_ShowGlobalGlyphs
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/nevu_ShowGlobalGlyphs/GlobalGlyph.glyphsReporter GlobalGlyph.glyphsReporter
cd ~/Documents/GlyphsScripts/
echo '================================'
echo 'Done Nevu ShowGlobalGlyph Plugin'
echo '================================'
git clone https://github.com/ohbendy/Python-scripts-for-Glyphs.git ohbendy_PluginsAndScripts
cd ohbendy_PluginsAndScripts
printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
# First the Plugins
cd ~/Library/Application\ Support/Glyphs/Plugins/
ln -s ~/Documents/GlyphsScripts/ohbendy_PluginsAndScripts/ShowFlippedComponents.glyphsReporter ShowFlippedComponents.glyphsReporter
echo '============================================='
echo "Done ohbendy's Show Flipped Components Plugin"
echo '============================================='
ln -s ~/Documents/GlyphsScripts/ohbendy_PluginsAndScripts/ShowMasterName\&Glyph.glyphsReporter ShowMasterName\&Glyph.glyphsReporter
echo '=============================================='
echo "Done ohbendy's Show Master Name & Glyph Plugin"
echo '=============================================='
cd ~/Documents/GlyphsScripts/
# Then the scripts
cd ~/Library/Application\ Support/Glyphs/Scripts/
ln -s ~/Documents/GlyphsScripts/ohbendy_PluginsAndScripts/ ohBendy
cd ~/Documents/GlyphsScripts/
echo '======================'
echo "Done ohbendy's Scripts"
echo '======================'
##############################
### Testing releases ###
# git clone https://github.com/simoncozens/Callipers.git SimonCozens_Callipers
# latestTag=$(git describe --tags `git rev-list --tags --max-count=1`)
# cd SimonCozens_Callipers
# curl https://github.com/simoncozens/Callipers/releases/download/$latestTag/Callipers-$latestTag.zip
# unzip Callipers-$latestTag.zip ./
# printf '*.vfbak\n*.pyc\n.DS_Store\nREADME.*\nLICENSE.*\n.gitignore\n*.vdiff\nLICENSE\n*png\n' > .gitignore
# printf '*/5 * * * * app cd '$(pwd)' && git fetch -q --all -p\n' >> /tmp/GlyphsScriptsConfi/sync_git_repos
# cd ~/Library/Application\ Support/Glyphs/Plugins/
# ln -s ~/Documents/GlyphsScripts/SimonCozens_Callipers/Callipers.glyphsTool Callipers.glyphsTool
# cd ~/Documents/GlyphsScripts/
# echo '=================================='
# echo 'Done Simon Cozens Callipers Plugin'
# echo '=================================='
### Testing releases ###
##############################
############################################
###### End of New Scripts and Filters ######
############################################
sudo ditto /tmp/GlyphsScriptsConfi/sync_git_repos /private/etc/cron.d/sync_git_repos
echo Finished Glyphs Scripts setup
|
zar-nicolas20/GlyphsApp-Package-Install
|
GlyphsScriptsConfi.sh
|
Shell
|
apache-2.0
| 52,318 |
TARGET_ARCH=amd64
KERNCONF=GENERIC
IMAGE_SIZE=$((2000 * 1000 * 1000))
generic_amd64_partition_image ( ) {
# TODO: create UEFI disk
# basic setup
disk_partition_mbr
disk_ufs_create
disk_ufs_label 1 bsdrootfs || exit 1
# boot loader
echo "Installing bootblocks($TARGET_ARCH)"
# TODO: This is broken; should use 'make install' to copy
# bootfiles to workdir, then install to disk image from there.
BOOTFILES=${FREEBSD_OBJDIR}/sys/boot/i386
echo "Boot files are at: "${BOOTFILES}
gpart bootcode -b ${BOOTFILES}/mbr/mbr ${DISK_MD} || exit 1
gpart set -a active -i 1 ${DISK_MD} || exit 1
echo "befor bsdlabel"
gpart show ${DISK_MD}
gpart show ${NEW_UFS_SLICE}
bsdlabel -w -B -b ${BOOTFILES}/boot2/boot `disk_ufs_partition` || exit 1
#show the disk
echo "Installing bootblocks($TARGET_ARCH) done, bsdlabel to `disk_ufs_partition`"
gpart show ${DISK_MD}
gpart show ${NEW_UFS_SLICE}
}
strategy_add $PHASE_PARTITION_LWW generic_amd64_partition_image
# Kernel installs in UFS partition
strategy_add $PHASE_FREEBSD_BOARD_INSTALL board_default_installkernel .
|
wheelcomplex/crochet-freebsd
|
board/GenericAMD64/setup.sh
|
Shell
|
bsd-2-clause
| 1,174 |
#/bin/sh
_DIR=$(dirname $BASH_SOURCE)
echo pushd ${_DIR}/..
pushd ${_DIR}/..
pub run test:test -p vm -p dartium -r expanded
|
alextekartik/mdl_js_react.dart
|
test/run_all.sh
|
Shell
|
bsd-2-clause
| 125 |
#!/bin/sh
set -e
uname -a
${CC} --version
python --version
pip --version
./configure
make
make clean
#sudo pip install -r requirements.txt
#sudo DEBIAN_FRONTEND=noninteractive apt-get update
sudo DEBIAN_FRONTEND=noninteractive apt-get install libgsm1-dev libsndfile1-dev tcpdump curl
tcpdump --version || true
mkdir deps
cd deps
wget http://download-mirror.savannah.gnu.org/releases/linphone/plugins/sources/bcg729-1.0.0.tar.gz
tar xfz bcg729-1.0.0.tar.gz
cd bcg729-1.0.0
perl -pi -e 's|BASICOPERATIONSMACROS__H|BASICOPERATIONSMACROS_H|g' include/basicOperationsMacros.h
./configure
make
sudo make install
cd ..
git clone git://github.com/sippy/libg722 libg722
cd libg722
make
sudo make install
cd ../..
sudo ldconfig
autoreconf --force --install --verbose
./configure
make
TEST_WITNESS_ENABLE=yes make check || (cat tests/test-suite.log; exit 1)
|
hongbinz/rtpproxy
|
scripts/do-test.sh
|
Shell
|
bsd-2-clause
| 849 |
#!/bin/bash
PWD="/usr/src/nanny/backend"
export PYTHONIOENCODING=utf-8
cd $PWD
DATE=`date +"%Y%m%d"`
python crawler.py nanny_$DATE.sqlite > log_$DATE.txt
if [ $? = "0" ]; then
cp nanny_$DATE.sqlite nanny.sqlite
fi
|
Shihta/nanny
|
config/crawler.sh
|
Shell
|
bsd-3-clause
| 222 |
#!/bin/sh
# First parameter gives config file
echo "Parametry $@"
echo "Pocet parametru $#"
add=""
add2=""
if [ $# -eq 1 ]; then
echo "in iff"
add="-cf"
add2="$1"
fi
echo "add $add"
python lisa.py -ni -dd ~/data/medical/processed/spring2014/exp010-seeds/org-liver-orig001.mhd-exp010-seeds.pklz
python lisa.py -ni -dd ~/data/medical/processed/spring2014/exp010-seeds/org-liver-orig002.mhd-exp010-seeds.pklz
python lisa.py -ni -dd ~/data/medical/processed/spring2014/exp010-seeds/org-liver-orig003.mhd-exp010-seeds.pklz
python lisa.py -ni -dd ~/data/medical/processed/spring2014/exp010-seeds/org-liver-orig004.mhd-exp010-seeds.pklz
python lisa.py -ni -dd ~/data/medical/processed/spring2014/exp010-seeds/org-liver-orig005.mhd-exp010-seeds.pklz
echo "5"
python lisa.py -ni -dd ~/data/medical/processed/spring2014/exp010-seeds/org-liver-orig006.mhd-exp010-seeds.pklz
python lisa.py -ni -dd ~/data/medical/processed/spring2014/exp010-seeds/org-liver-orig007.mhd-exp010-seeds.pklz
python lisa.py -ni -dd ~/data/medical/processed/spring2014/exp010-seeds/org-liver-orig008.mhd-exp010-seeds.pklz
python lisa.py -ni -dd ~/data/medical/processed/spring2014/exp010-seeds/org-liver-orig009.mhd-exp010-seeds.pklz
python lisa.py -ni -dd ~/data/medical/processed/spring2014/exp010-seeds/org-liver-orig010.mhd-exp010-seeds.pklz
echo "10"
python lisa.py -ni -dd ~/data/medical/processed/spring2014/exp010-seeds/org-liver-orig011.mhd-exp010-seeds.pklz
python lisa.py -ni -dd ~/data/medical/processed/spring2014/exp010-seeds/org-liver-orig012.mhd-exp010-seeds.pklz
python lisa.py -ni -dd ~/data/medical/processed/spring2014/exp010-seeds/org-liver-orig013.mhd-exp010-seeds.pklz
python lisa.py -ni -dd ~/data/medical/processed/spring2014/exp010-seeds/org-liver-orig014.mhd-exp010-seeds.pklz
python lisa.py -ni -dd ~/data/medical/processed/spring2014/exp010-seeds/org-liver-orig015.mhd-exp010-seeds.pklz
echo "15"
python lisa.py -ni -dd ~/data/medical/processed/spring2014/exp010-seeds/org-liver-orig016.mhd-exp010-seeds.pklz
python lisa.py -ni -dd ~/data/medical/processed/spring2014/exp010-seeds/org-liver-orig017.mhd-exp010-seeds.pklz
python lisa.py -ni -dd ~/data/medical/processed/spring2014/exp010-seeds/org-liver-orig018.mhd-exp010-seeds.pklz
python lisa.py -ni -dd ~/data/medical/processed/spring2014/exp010-seeds/org-liver-orig019.mhd-exp010-seeds.pklz
python lisa.py -ni -dd ~/data/medical/processed/spring2014/exp010-seeds/org-liver-orig020.mhd-exp010-seeds.pklz
|
mjirik/lisa
|
experiments/exp-spring2014.sh
|
Shell
|
bsd-3-clause
| 2,468 |
#!/bin/sh
git filter-branch --env-filter '
OLD_EMAIL="[email protected]"
CORRECT_NAME="Your Correct Name"
CORRECT_EMAIL="[email protected]"
if [ "$GIT_COMMITTER_EMAIL" = "$OLD_EMAIL" ]
then
export GIT_COMMITTER_NAME="$CORRECT_NAME"
export GIT_COMMITTER_EMAIL="$CORRECT_EMAIL"
fi
if [ "$GIT_AUTHOR_EMAIL" = "$OLD_EMAIL" ]
then
export GIT_AUTHOR_NAME="$CORRECT_NAME"
export GIT_AUTHOR_EMAIL="$CORRECT_EMAIL"
fi
' --tag-name-filter cat -- --branches --tags
# Afterwards do:
# git push --force --tags origin 'refs/heads/*'
|
herrbischoff/dotfiles
|
home/bin/git-author-rewrite.sh
|
Shell
|
bsd-3-clause
| 560 |
#!/usr/bin/env bash
# Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# The syscall package provides access to the raw system call
# interface of the underlying operating system. Porting Go to
# a new architecture/operating system combination requires
# some manual effort, though there are tools that automate
# much of the process. The auto-generated files have names
# beginning with z.
#
# This script runs or (given -n) prints suggested commands to generate z files
# for the current system. Running those commands is not automatic.
# This script is documentation more than anything else.
#
# * asm_${GOOS}_${GOARCH}.s
#
# This hand-written assembly file implements system call dispatch.
# There are three entry points:
#
# func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr);
# func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr);
# func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr);
#
# The first and second are the standard ones; they differ only in
# how many arguments can be passed to the kernel.
# The third is for low-level use by the ForkExec wrapper;
# unlike the first two, it does not call into the scheduler to
# let it know that a system call is running.
#
# * syscall_${GOOS}.go
#
# This hand-written Go file implements system calls that need
# special handling and lists "//sys" comments giving prototypes
# for ones that can be auto-generated. Mksyscall reads those
# comments to generate the stubs.
#
# * syscall_${GOOS}_${GOARCH}.go
#
# Same as syscall_${GOOS}.go except that it contains code specific
# to ${GOOS} on one particular architecture.
#
# * types_${GOOS}.c
#
# This hand-written C file includes standard C headers and then
# creates typedef or enum names beginning with a dollar sign
# (use of $ in variable names is a gcc extension). The hardest
# part about preparing this file is figuring out which headers to
# include and which symbols need to be #defined to get the
# actual data structures that pass through to the kernel system calls.
# Some C libraries present alternate versions for binary compatibility
# and translate them on the way in and out of system calls, but
# there is almost always a #define that can get the real ones.
# See types_darwin.c and types_linux.c for examples.
#
# * zerror_${GOOS}_${GOARCH}.go
#
# This machine-generated file defines the system's error numbers,
# error strings, and signal numbers. The generator is "mkerrors.sh".
# Usually no arguments are needed, but mkerrors.sh will pass its
# arguments on to godefs.
#
# * zsyscall_${GOOS}_${GOARCH}.go
#
# Generated by mksyscall.pl; see syscall_${GOOS}.go above.
#
# * zsysnum_${GOOS}_${GOARCH}.go
#
# Generated by mksysnum_${GOOS}.
#
# * ztypes_${GOOS}_${GOARCH}.go
#
# Generated by godefs; see types_${GOOS}.c above.
GOOSARCH="${GOOS}_${GOARCH}"
# defaults
mksyscall="./mksyscall.pl"
mkerrors="./mkerrors.sh"
zerrors="zerrors_$GOOSARCH.go"
mksysctl=""
zsysctl="zsysctl_$GOOSARCH.go"
mksysnum=
mktypes=
mkasm=
run="sh"
case "$1" in
-syscalls)
for i in zsyscall*go
do
# Run the command line that appears in the first line
# of the generated file to regenerate it.
sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i
rm _$i
done
exit 0
;;
-n)
run="cat"
shift
esac
case "$#" in
0)
;;
*)
echo 'usage: mkall.sh [-n]' 1>&2
exit 2
esac
GOOSARCH_in=syscall_$GOOSARCH.go
case "$GOOSARCH" in
_* | *_ | _)
echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2
exit 1
;;
aix_ppc64)
mkerrors="$mkerrors -maix64"
mksyscall="./mksyscall_libc.pl -aix"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
darwin_amd64)
mkerrors="$mkerrors -m64"
mksyscall="./mksyscall.pl -darwin"
mksysnum="./mksysnum_darwin.pl /usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
mkasm="go run mkasm_darwin.go"
;;
darwin_arm64)
mkerrors="$mkerrors -m64"
mksyscall="./mksyscall.pl -darwin"
mksysnum="./mksysnum_darwin.pl /usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
mkasm="go run mkasm_darwin.go"
;;
dragonfly_amd64)
mkerrors="$mkerrors -m64"
mksyscall="./mksyscall.pl -dragonfly"
mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
freebsd_386)
mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32"
mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
freebsd_amd64)
mkerrors="$mkerrors -m64"
mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
freebsd_arm)
mkerrors="$mkerrors"
mksyscall="./mksyscall.pl -l32 -arm"
mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
# Let the type of C char be signed to make the bare syscall
# API consistent between platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
freebsd_arm64)
mkerrors="$mkerrors"
mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
# Let the type of C char be signed to make the bare syscall
# API consistent between platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
linux_386)
mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32"
mksysnum="./mksysnum_linux.pl /usr/include/asm/unistd_32.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
linux_amd64)
unistd_h=$(ls -1 /usr/include/asm/unistd_64.h /usr/include/x86_64-linux-gnu/asm/unistd_64.h 2>/dev/null | head -1)
if [ "$unistd_h" = "" ]; then
echo >&2 cannot find unistd_64.h
exit 1
fi
mkerrors="$mkerrors -m64"
mksysnum="./mksysnum_linux.pl $unistd_h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
linux_arm)
mkerrors="$mkerrors"
mksyscall="./mksyscall.pl -l32 -arm"
mksysnum="curl -s 'http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/plain/arch/arm/include/uapi/asm/unistd.h' | ./mksysnum_linux.pl -"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
linux_arm64)
unistd_h=$(ls -1 /usr/include/asm/unistd.h /usr/include/asm-generic/unistd.h 2>/dev/null | head -1)
if [ "$unistd_h" = "" ]; then
echo >&2 cannot find unistd_64.h
exit 1
fi
mksysnum="./mksysnum_linux.pl $unistd_h"
# Let the type of C char be signed to make the bare syscall
# API consistent between platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
linux_mips)
GOOSARCH_in=syscall_linux_mipsx.go
unistd_h=/usr/include/asm/unistd.h
mksyscall="./mksyscall.pl -b32 -arm"
mkerrors="$mkerrors"
mksysnum="./mksysnum_linux.pl $unistd_h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
linux_mipsle)
GOOSARCH_in=syscall_linux_mipsx.go
unistd_h=/usr/include/asm/unistd.h
mksyscall="./mksyscall.pl -l32 -arm"
mkerrors="$mkerrors"
mksysnum="./mksysnum_linux.pl $unistd_h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
linux_mips64)
GOOSARCH_in=syscall_linux_mips64x.go
unistd_h=/usr/include/asm/unistd.h
mkerrors="$mkerrors -m64"
mksysnum="./mksysnum_linux.pl $unistd_h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
linux_mips64le)
GOOSARCH_in=syscall_linux_mips64x.go
unistd_h=/usr/include/asm/unistd.h
mkerrors="$mkerrors -m64"
mksysnum="./mksysnum_linux.pl $unistd_h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
linux_ppc64)
GOOSARCH_in=syscall_linux_ppc64x.go
unistd_h=/usr/include/asm/unistd.h
mkerrors="$mkerrors -m64"
mksysnum="./mksysnum_linux.pl $unistd_h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
linux_ppc64le)
GOOSARCH_in=syscall_linux_ppc64x.go
unistd_h=/usr/include/powerpc64le-linux-gnu/asm/unistd.h
mkerrors="$mkerrors -m64"
mksysnum="./mksysnum_linux.pl $unistd_h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
linux_riscv64)
unistd_h=$(ls -1 /usr/include/asm/unistd.h /usr/include/asm-generic/unistd.h 2>/dev/null | head -1)
if [ "$unistd_h" = "" ]; then
echo >&2 cannot find unistd_64.h
exit 1
fi
mksysnum="./mksysnum_linux.pl $unistd_h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
linux_s390x)
GOOSARCH_in=syscall_linux_s390x.go
unistd_h=/usr/include/asm/unistd.h
mkerrors="$mkerrors -m64"
mksysnum="./mksysnum_linux.pl $unistd_h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
netbsd_386)
mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32 -netbsd"
mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
netbsd_amd64)
mkerrors="$mkerrors -m64"
mksyscall="./mksyscall.pl -netbsd"
mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
netbsd_arm)
mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32 -netbsd -arm"
mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
netbsd_arm64)
mkerrors="$mkerrors -m64"
mksyscall="./mksyscall.pl -netbsd"
mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_386)
mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32 -openbsd"
mksysctl="./mksysctl_openbsd.pl"
zsysctl="zsysctl_openbsd.go"
mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_amd64)
mkerrors="$mkerrors -m64"
mksyscall="./mksyscall.pl -openbsd"
mksysctl="./mksysctl_openbsd.pl"
zsysctl="zsysctl_openbsd.go"
mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_arm)
mkerrors="$mkerrors"
mksyscall="./mksyscall.pl -l32 -openbsd -arm"
mksysctl="./mksysctl_openbsd.pl"
zsysctl="zsysctl_openbsd.go"
mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
# Let the type of C char be signed to make the bare syscall
# API consistent between platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
openbsd_arm64)
mkerrors="$mkerrors -m64"
mksyscall="./mksyscall.pl -openbsd"
mksysctl="./mksysctl_openbsd.pl"
zsysctl="zsysctl_openbsd.go"
mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
# Let the type of C char be signed to make the bare syscall
# API consistent between platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
plan9_386)
mkerrors=
mksyscall="./mksyscall.pl -l32 -plan9"
mksysnum="./mksysnum_plan9.sh /n/sources/plan9/sys/src/libc/9syscall/sys.h"
mktypes="XXX"
;;
solaris_amd64)
mksyscall="./mksyscall_libc.pl -solaris"
mkerrors="$mkerrors -m64"
mksysnum=
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
windows_*)
echo 'run "go generate" instead' 1>&2
exit 1
;;
*)
echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2
exit 1
;;
esac
(
if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi
syscall_goos="syscall_$GOOS.go"
case "$GOOS" in
darwin | dragonfly | freebsd | netbsd | openbsd)
syscall_goos="syscall_bsd.go $syscall_goos"
;;
esac
if [ -n "$mksyscall" ]; then echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi
if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
if [ -n "$mktypes" ]; then
# ztypes_$GOOSARCH.go could be erased before "go run mkpost.go" is called.
# Therefore, "go run" tries to recompile syscall package but ztypes is empty and it fails.
echo "$mktypes types_$GOOS.go |go run mkpost.go >ztypes_$GOOSARCH.go.NEW && mv ztypes_$GOOSARCH.go.NEW ztypes_$GOOSARCH.go";
fi
if [ -n "$mkasm" ]; then echo "$mkasm $GOARCH"; fi
) | $run
|
akutz/go
|
src/syscall/mkall.sh
|
Shell
|
bsd-3-clause
| 12,348 |
sed -e "s/^.*;//; s/,/ /g" $1|while read line || [ -n "$line" ]; do
a=( $line )
echo "${a[*]}"|tr " " "\n"|sort|uniq -d
done
|
nikai3d/ce-challenges
|
moderate/array_absurdity.bash
|
Shell
|
bsd-3-clause
| 129 |
#!/bin/bash
sed -i 's|127.0.0.1|'"${DBHOST}"'|g' app/config.app.php
sed -i 's|".*/FrameworkBenchmarks/php-pimf|"'"${TROOT}"'|g' deploy/php-pimf
sed -i 's|Directory .*/FrameworkBenchmarks/php-pimf|Directory '"${TROOT}"'|g' deploy/php-pimf
sed -i 's|root .*/FrameworkBenchmarks/php-pimf|root '"${TROOT}"'|g' deploy/php-pimf
sed -i 's|/usr/local/nginx/|'"${IROOT}"'/nginx/|g' deploy/nginx.conf
export PATH="$COMPOSER_HOME:$PHP_HOME/bin:$PHP_HOME/sbin:$PATH"
composer.phar install --optimize-autoloader
$PHP_FPM --fpm-config $FWROOT/config/php-fpm.conf -g $TROOT/deploy/php-fpm.pid
$NGINX_HOME/sbin/nginx -c $TROOT/deploy/nginx.conf
|
kellabyte/FrameworkBenchmarks
|
frameworks/PHP/php-pimf/setup.sh
|
Shell
|
bsd-3-clause
| 632 |
#!/bin/bash
# Driver script if you're using a virtualenv called 'env' and want to
# activate it.
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
pushd $DIR > /dev/null
source env/bin/activate
pushd src > /dev/null
exec ./tw.py $@
popd > /dev/null
popd > /dev/null
|
wingu/tracewhack
|
tw.sh
|
Shell
|
bsd-3-clause
| 277 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_PLATFORM=GNU-Linux-x86
CND_CONF=Debug
CND_DISTDIR=dist
CND_BUILDDIR=build
CND_DLIB_EXT=so
NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/testpreparation
OUTPUT_BASENAME=testpreparation
PACKAGE_TOP_DIR=testpreparation/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package
rm -rf ${NBTMPDIR}
mkdir -p ${NBTMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory "${NBTMPDIR}/testpreparation/bin"
copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/testpreparation.tar
cd ${NBTMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/testpreparation.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${NBTMPDIR}
|
Rossoner40/NBU-Classwork-and-Homework
|
OOP/TestPreparation/nbproject/Package-Debug.bash
|
Shell
|
mit
| 1,491 |
#!/bin/bash
# ex40.sh (burn-cd.sh)
# Script to automate burning a CDR.
SPEED=10 # May use higher speed if your hardware supports it.
IMAGEFILE=cdimage.iso
CONTENTSFILE=contents
# DEVICE=/dev/cdrom For older versions of cdrecord
DEVICE="1,0,0"
DEFAULTDIR=/opt # This is the directory containing the data to be burned.
# Make sure it exists.
# Exercise: Add a test for this.
# Uses Joerg Schilling's "cdrecord" package:
# http://www.fokus.fhg.de/usr/schilling/cdrecord.html
# If this script invoked as an ordinary user, may need to suid cdrecord
#+ chmod u+s /usr/bin/cdrecord, as root.
# Of course, this creates a security hole, though a relatively minor one.
if [ -z "$1" ]
then
IMAGE_DIRECTORY=$DEFAULTDIR
# Default directory, if not specified on command-line.
else
IMAGE_DIRECTORY=$1
fi
# Create a "table of contents" file.
ls -lRF $IMAGE_DIRECTORY > $IMAGE_DIRECTORY/$CONTENTSFILE
# The "l" option gives a "long" file listing.
# The "R" option makes the listing recursive.
# The "F" option marks the file types (directories get a trailing /).
echo "Creating table of contents."
# Create an image file preparatory to burning it onto the CDR.
mkisofs -r -o $IMAGEFILE $IMAGE_DIRECTORY
echo "Creating ISO9660 file system image ($IMAGEFILE)."
# Burn the CDR.
echo "Burning the disk."
echo "Please be patient, this will take a while."
wodim -v -isosize dev=$DEVICE $IMAGEFILE
# In newer Linux distros, the "wodim" utility assumes the
#+ functionality of "cdrecord."
exitcode=$?
echo "Exit code = $exitcode"
exit $exitcode
|
yimng/LEARN
|
abs/16/burn-cd.sh
|
Shell
|
mit
| 1,587 |
#!/bin/bash
# +---------------------------------------------------------------------------+
# | script for deploying a Seagull release |
# +---------------------------------------------------------------------------+
# args
REVISION_NUM=$1
PREVIOUS_REVISION_NUM=$(($REVISION_NUM - 1))
MODE_TEST=$2
USER=demian
DOMAIN=example.com
PROJECT_NAME=Seagull
UPLOADED_TARBALL=/tmp/$PROJECT_NAME-$REVISION_NUM.tar.gz
SEAGULL_DIR=/var/www/html/seagull/branches/0.6-bugfix
##############################
# check if this is a staging deployment
##############################
if [ -z $MODE_TEST ] ; then
IS_LIVE=0;
else
IS_LIVE=1;
fi
##############################
# dynamically set paths
##############################
if (( $IS_LIVE )) ; then
DEPLOY_DIR=/var/www/html/sgl_deploy_live
STATIC_DIR=/var/www/html/sgl_deploy_live/123
else
DEPLOY_DIR=/var/www/html/sgl_deploy_staging
STATIC_DIR=/var/www/html/sgl_deploy_staging/999
fi
##############################
# unzip tarball
##############################
cd /tmp
tar xvzf $UPLOADED_TARBALL
rm -f $UPLOADED_TARBALL
##############################
# check that we are not overwriting
##############################
if [ -d "$DEPLOY_DIR/$REVISION_NUM" ]; then
echo "cannot overwrite existing deployment";
exit 1;
fi
##############################
# move folder to deploy dir
##############################
mv $PROJECT_NAME-$REVISION_NUM $DEPLOY_DIR/$REVISION_NUM
##############################
# symlink images and var dirs
##############################
ln -s $STATIC_DIR/var $DEPLOY_DIR/$REVISION_NUM/var
rm -rf $DEPLOY_DIR/$REVISION_NUM/www/images/Image
ln -s $STATIC_DIR/www/images/Image $DEPLOY_DIR/$REVISION_NUM/www/images/Image
##############################
# make various dirs writable
##############################
chmod 777 $DEPLOY_DIR/$REVISION_NUM/www
##############################
# symlink in foo web resources
##############################
ln -s $DEPLOY_DIR/$REVISION_NUM/modulesFOO/foo/www $DEPLOY_DIR/$REVISION_NUM/www/foo
##############################
# symlink in Seagull repo
##############################
ln -s $SEAGULL_DIR/modules $DEPLOY_DIR/$REVISION_NUM/modules
ln -s $SEAGULL_DIR/modules/block $DEPLOY_DIR/$REVISION_NUM/modulesFOO/block
ln -s $SEAGULL_DIR/modules/default $DEPLOY_DIR/$REVISION_NUM/modulesFOO/default
ln -s $SEAGULL_DIR/modules/navigation $DEPLOY_DIR/$REVISION_NUM/modulesFOO/navigation
ln -s $SEAGULL_DIR/modules/translation $DEPLOY_DIR/$REVISION_NUM/modulesFOO/translation
ln -s $SEAGULL_DIR/lib/SGL.php $DEPLOY_DIR/$REVISION_NUM/lib/SGL.php
ln -s $SEAGULL_DIR/lib/SGL $DEPLOY_DIR/$REVISION_NUM/lib/SGL
ln -s $SEAGULL_DIR/lib/data $DEPLOY_DIR/$REVISION_NUM/lib/data
ln -s $SEAGULL_DIR/lib/pear $DEPLOY_DIR/$REVISION_NUM/lib/pear
|
demianturner/seagullframework-sandbox
|
etc/deploy_remote.sh
|
Shell
|
mit
| 2,804 |
#!/usr/bin/env bash
exec ${0/*${TOOLCHAIN}-/\/usr\/bin\/x86_64-linux-gnu-} -m32 "$@"
|
dockcross/dockcross
|
linux-x86/i686-linux-gnu.sh
|
Shell
|
mit
| 85 |
#!/bin/bash
mkdir -p tmp/rivers
cargo build
PORT=3100 cargo run &
JOB=$!
sleep 1
cargo test && kill $JOB
|
tempbottle/john
|
build.sh
|
Shell
|
mit
| 108 |
#!/bin/bash
# Before executing this script make sure you have the 64 bit debian package installed.
cat safecheckerfiles.txt | zip safechecker-linux-64.zip -@
zip -r safechecker-linux-64.zip /opt/safechecker
|
proxysh/Safejumper-for-Mac
|
buildlinux/makezipsafechecker64.sh
|
Shell
|
gpl-2.0
| 207 |
#!/bin/sh
# Script to download and install Flash Player.
# Only works on Intel systems.
dmgfile="flash.dmg"
volname="Flash"
logfile="/Library/Logs/FlashUpdateScript.log"
# Are we running on Intel?
if [ '`/usr/bin/uname -p`'="i386" -o '`/usr/bin/uname -p`'="x86_64" ]; then
# Get the latest version of Flash Player available from Adobe's About Flash page.
latestver=`/usr/bin/curl -s http://www.adobe.com/software/flash/about/ | /usr/bin/grep -A2 "Macintosh - OS X" | /usr/bin/grep -A1 "Safari" | /usr/bin/sed -e 's/<[^>][^>]*>//g' -e '/^ *$/d' | /usr/bin/tail -n 1 | /usr/bin/awk '{print $1}'`
# Get the version number of the currently-installed Flash Player, if any.
if [ -e "/Library/Internet Plug-Ins/Flash Player.plugin" ]; then
currentinstalledver=`/usr/bin/defaults read /Library/Internet\ Plug-Ins/Flash\ Player.plugin/Contents/version CFBundleShortVersionString`
else
currentinstalledver="none"
fi
# Compare the two versions, if they are different of Flash is not present then download and install the new version.
if [ "${currentinstalledver}" != "${latestver}" ]; then
/bin/echo "`date`: Current Flash version: ${currentinstalledver}" >> ${logfile}
/bin/echo "`date`: Available Flash version: ${latestver}" >> ${logfile}
/bin/echo "`date`: Downloading newer version." >> ${logfile}
/usr/bin/curl -s -o `/usr/bin/dirname $0`/flash.dmg http://fpdownload.macromedia.com/get/flashplayer/current/install_flash_player_osx_intel.dmg
/bin/echo "`date`: Mounting installer disk image." >> ${logfile}
/usr/bin/hdiutil attach `dirname $0`/flash.dmg -nobrowse -quiet
/bin/echo "`date`: Installing..." >> ${logfile}
/usr/sbin/installer -pkg /Volumes/Flash\ Player/Install\ Adobe\ Flash\ Player.app/Contents/Resources/Adobe\ Flash\ Player.pkg -target / > /dev/null
/bin/sleep 10
/bin/echo "`date`: Unmounting installer disk image." >> ${logfile}
/usr/bin/hdiutil detach $(/bin/df | /usr/bin/grep ${volname} | awk '{print $1}') -quiet
/bin/sleep 10
/bin/echo "`date`: Deleting disk image." >> ${logfile}
/bin/rm `/usr/bin/dirname $0`/${dmgfile}
newlyinstalledver=`/usr/bin/defaults read /Library/Internet\ Plug-Ins/Flash\ Player.plugin/Contents/version CFBundleShortVersionString`
if [ "${latestver}" = "${newlyinstalledver}" ]; then
/bin/echo "`date`: SUCCESS: Flash has been updated to version ${newlyinstalledver}" >> ${logfile}
else
/bin/echo "`date`: ERROR: Flash update unsuccessful, version remains at {currentinstalledver}." >> ${logfile}
/bin/echo "--" >> ${logfile}
fi
# If Flash is up to date already, just log it and exit.
else
/bin/echo "`date`: Flash is already up to date, running ${currentinstalledver}." >> ${logfile}
/bin/echo "--" >> ${logfile}
fi
else
/bin/echo "`date`: ERROR: This script is for Intel Macs only." >> ${logfile}
fi
|
fproject/f-project
|
installFlash.sh
|
Shell
|
gpl-2.0
| 2,867 |
#!/bin/sh
# Copyright (C) 2014 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
. lib/inittest
test -e LOCAL_LVMETAD || skip
aux prepare_pvs 2
pvs --config 'devices { filter = [ "r%.*%" ] }' 2>&1 | grep rejected
pvs --config 'devices { filter = [ "r%.*%" ] }' 2>&1 | not grep 'No device found'
|
akiradeveloper/lvm2
|
test/shell/lvmetad-client-filter.sh
|
Shell
|
gpl-2.0
| 677 |
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (c) 2014-2018 Oracle and/or its affiliates. All Rights Reserved.
# Copyright (c) 2018 Petr Vorel <[email protected]>
# Author: Alexey Kodanev [email protected]
TST_SETUP="dhcp_lib_setup"
TST_CLEANUP="dhcp_lib_cleanup"
TST_TESTFUNC="test01"
TST_NEEDS_TMPDIR=1
TST_NEEDS_ROOT=1
TST_NEEDS_CMDS="cat $dhcp_name awk ip pgrep pkill dhclient"
. tst_net.sh
. daemonlib.sh
iface0="ltp_veth0"
iface1="ltp_veth1"
stop_dhcp()
{
[ "$(pgrep -x $dhcp_name)" ] || return 0
tst_res TINFO "stopping $dhcp_name"
local count=0
while [ $count -le 10 ]; do
pkill -x $dhcp_name
[ "$(pgrep -x $dhcp_name)" ] || return 0
tst_sleep 100ms
count=$((count + 1))
done
pkill -9 -x $dhcp_name
tst_sleep 100ms
[ "$(pgrep -x $dhcp_name)" ] && return 1 || return 0
}
dhcp_lib_setup()
{
if [ $HAVE_SYSTEMCTL -eq 1 ] && \
systemctl --no-pager -p Id show network.service | grep -q Id=wicked.service; then
[ $TST_IPV6 ] && tst_brk TCONF "wicked not supported on IPv6"
is_wicked=1
fi
[ -z "$log" ] && log="$PWD/$(basename $0 '.sh').log"
if [ $TST_IPV6 ]; then
ip_addr="fd00:1:1:2::12/64"
ip_addr_check_noprefix="fd00:1:1:2::100"
ip_addr_check="$ip_addr_check_noprefix/128"
else
ip_addr="10.1.1.12/24"
ip_addr_check_noprefix="10.1.1.100"
ip_addr_check="$ip_addr_check_noprefix/24"
fi
lsmod | grep -q '^veth ' && veth_loaded=yes || veth_loaded=no
tst_res TINFO "create veth interfaces"
ip link add $iface0 type veth peer name $iface1 || \
tst_brk TBROK "failed to add veth $iface0"
veth_added=1
ip link set up $iface0 || tst_brk TBROK "failed to bring $iface0 up"
ip link set up $iface1 || tst_brk TBROK "failed to bring $iface1 up"
stop_dhcp || tst_brk TBROK "Failed to stop dhcp server"
dhclient_lease="/var/lib/dhclient/dhclient${TST_IPV6}.leases"
[ -f $dhclient_lease ] || dhclient_lease="/var/lib/dhcp/dhclient${TST_IPV6}.leases"
if [ -f $dhclient_lease ]; then
tst_res TINFO "backup dhclient${TST_IPV6}.leases"
mv $dhclient_lease .
fi
tst_res TINFO "add $ip_addr to $iface0"
ip addr add $ip_addr dev $iface0 || \
tst_brk TBROK "failed to add ip address"
if [ ! -d "$lease_dir" ]; then
mkdir -p $lease_dir
lease_dir_added=1
fi
}
dhcp_lib_cleanup()
{
[ -z "$veth_loaded" ] && return
[ "$lease_dir_added" = 1 ] && rm -rf $lease_dir
rm -f $lease_file
stop_dhcp
pkill -f "dhclient -$TST_IPVER $iface1"
cleanup_dhcp
# restore dhclient leases
[ $dhclient_lease ] && rm -f $dhclient_lease
[ -f "dhclient${TST_IPV6}.leases" ] && \
mv dhclient${TST_IPV6}.leases $dhclient_lease
[ $veth_added ] && ip link del $iface0
[ "$veth_loaded" = "no" ] && lsmod | grep -q '^veth ' && rmmod veth
}
print_dhcp_log()
{
[ -f "$log" ] && cat $log
}
test01()
{
local wicked_cfg="/etc/sysconfig/network/ifcfg-$iface1"
local wicked_cleanup
tst_res TINFO "testing DHCP server $dhcp_name: $(print_dhcp_version)"
tst_res TINFO "using DHCP client: $(dhclient --version 2>&1)"
tst_res TINFO "starting DHCPv$TST_IPVER server on $iface0"
start_dhcp$TST_IPV6
if [ $? -ne 0 ]; then
print_dhcp_log
tst_brk TBROK "Failed to start $dhcp_name"
fi
sleep 1
if [ "$(pgrep '$dhcp_name')" ]; then
print_dhcp_log
tst_brk TBROK "Failed to start $dhcp_name"
fi
if [ "$is_wicked" ]; then
tst_res TINFO "wicked is running, don't start dhclient"
if [ ! -f "$wicked_cfg" ]; then
cat <<EOF > $wicked_cfg
BOOTPROTO='dhcp'
NAME='LTP card'
STARTMODE='auto'
USERCONTROL='no'
EOF
wicked_cleanup=1
else
tst_res TINFO "wicked config file $wicked_cfg already exist"
fi
tst_res TINFO "restarting wicked"
systemctl restart wicked
else
tst_res TINFO "starting dhclient -$TST_IPVER $iface1"
dhclient -$TST_IPVER $iface1 || tst_brk TBROK "dhclient failed"
fi
# check that we get configured ip address
ip addr show $iface1 | grep -q $ip_addr_check
if [ $? -eq 0 ]; then
tst_res TPASS "'$ip_addr_check' configured by DHCPv$TST_IPVER"
else
if ip addr show $iface1 | grep -q $ip_addr_check_noprefix; then
tst_res TFAIL "'$ip_addr_check_noprefix' configured but has wrong prefix, expect '$ip_addr_check'"
ip addr show $iface1
else
tst_res TFAIL "'$ip_addr_check' not configured by DHCPv$TST_IPVER"
print_dhcp_log
fi
fi
[ "$wicked_cleanup" ] && rm -f $wicked_cfg
stop_dhcp
}
|
pevik/ltp
|
testcases/network/dhcp/dhcp_lib.sh
|
Shell
|
gpl-2.0
| 4,334 |
#!/bin/bash
make distclean >/dev/null 2>&1
./configure \
--with-pic \
--disable-static \
--enable-shared \
--disable-directx \
--disable-sdl \
--without-x
make
strip libmpeg2/.libs/*.dll
|
xbmc/atv2
|
xbmc/cores/dvdplayer/Codecs/libmpeg2/make-xbmc-lib-win32.sh
|
Shell
|
gpl-2.0
| 206 |
# do nothing...
|
plesiv/hac
|
hac/config/runner/py.exec_compile.5.sh
|
Shell
|
gpl-2.0
| 16 |
#! /bin/sh
. ../../testenv.sh
analyze_failure repro.vhdl
analyze_failure repro2.vhdl
clean test
echo "Test successful"
|
tgingold/ghdl
|
testsuite/gna/issue9/testsuite.sh
|
Shell
|
gpl-2.0
| 124 |
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (c) 2009 IBM Corporation
# Copyright (c) 2018-2020 Petr Vorel <[email protected]>
# Author: Mimi Zohar <[email protected]>
TST_TESTFUNC="test"
TST_SETUP_CALLER="$TST_SETUP"
TST_SETUP="ima_setup"
TST_CLEANUP_CALLER="$TST_CLEANUP"
TST_CLEANUP="ima_cleanup"
TST_NEEDS_ROOT=1
# TST_NEEDS_DEVICE can be unset, therefore specify explicitly
TST_NEEDS_TMPDIR=1
. tst_test.sh
SYSFS="/sys"
UMOUNT=
TST_FS_TYPE="ext3"
# TODO: find support for rmd128 rmd256 rmd320 wp256 wp384 tgr128 tgr160
compute_digest()
{
local algorithm="$1"
local file="$2"
local digest
digest="$(${algorithm}sum $file 2>/dev/null | cut -f1 -d ' ')"
if [ -n "$digest" ]; then
echo "$digest"
return 0
fi
digest="$(openssl $algorithm $file 2>/dev/null | cut -f2 -d ' ')"
if [ -n "$digest" ]; then
echo "$digest"
return 0
fi
# uncommon ciphers
local arg="$algorithm"
case "$algorithm" in
tgr192) arg="tiger" ;;
wp512) arg="whirlpool" ;;
esac
digest="$(rdigest --$arg $file 2>/dev/null | cut -f1 -d ' ')"
if [ -n "$digest" ]; then
echo "$digest"
return 0
fi
return 1
}
check_policy_readable()
{
if [ ! -f $IMA_POLICY ]; then
tst_res TINFO "missing $IMA_POLICY (reboot or CONFIG_IMA_WRITE_POLICY=y required)"
return 1
fi
cat $IMA_POLICY > /dev/null 2>/dev/null
}
require_policy_readable()
{
if [ ! -f $IMA_POLICY ]; then
tst_brk TCONF "missing $IMA_POLICY (reboot or CONFIG_IMA_WRITE_POLICY=y required)"
fi
if ! check_policy_readable; then
tst_brk TCONF "cannot read IMA policy (CONFIG_IMA_READ_POLICY=y required)"
fi
}
require_policy_writable()
{
local err="IMA policy already loaded and kernel not configured to enable multiple writes to it (need CONFIG_IMA_WRITE_POLICY=y)"
[ -f $IMA_POLICY ] || tst_brk TCONF "$err"
# CONFIG_IMA_READ_POLICY
echo "" 2> log > $IMA_POLICY
grep -q "Device or resource busy" log && tst_brk TCONF "$err"
}
check_ima_policy_content()
{
local pattern="$1"
local grep_params="${2--q}"
check_policy_readable || return 1
grep $grep_params "$pattern" $IMA_POLICY
}
require_ima_policy_content()
{
local pattern="$1"
local grep_params="${2--q}"
require_policy_readable
if ! grep $grep_params "$pattern" $IMA_POLICY; then
tst_brk TCONF "IMA policy does not specify '$pattern'"
fi
}
check_ima_policy_cmdline()
{
local policy="$1"
local i
grep -q "ima_$policy" /proc/cmdline && return
for i in $(cat /proc/cmdline); do
if echo "$i" | grep -q '^ima_policy='; then
echo "$i" | grep -q -e "|[ ]*$policy" -e "$policy[ ]*|" -e "=$policy" && return 0
fi
done
return 1
}
require_ima_policy_cmdline()
{
local policy="$1"
check_ima_policy_cmdline $policy || \
tst_brk TCONF "IMA measurement tests require builtin IMA $policy policy (e.g. ima_policy=$policy kernel parameter)"
}
mount_helper()
{
local type="$1"
local default_dir="$2"
local dir
dir="$(grep ^$type /proc/mounts | cut -d ' ' -f2 | head -1)"
[ -n "$dir" ] && { echo "$dir"; return; }
if ! mkdir -p $default_dir; then
tst_brk TBROK "failed to create $default_dir"
fi
if ! mount -t $type $type $default_dir; then
tst_brk TBROK "failed to mount $type"
fi
UMOUNT="$default_dir $UMOUNT"
echo $default_dir
}
mount_loop_device()
{
local ret
tst_mkfs
tst_mount
cd $TST_MNTPOINT
}
print_ima_config()
{
local config="${KCONFIG_PATH:-/boot/config-$(uname -r)}"
local i
if [ -r "$config" ]; then
tst_res TINFO "IMA kernel config:"
for i in $(grep ^CONFIG_IMA $config); do
tst_res TINFO "$i"
done
fi
tst_res TINFO "/proc/cmdline: $(cat /proc/cmdline)"
}
ima_setup()
{
SECURITYFS="$(mount_helper securityfs $SYSFS/kernel/security)"
IMA_DIR="$SECURITYFS/ima"
[ -d "$IMA_DIR" ] || tst_brk TCONF "IMA not enabled in kernel"
ASCII_MEASUREMENTS="$IMA_DIR/ascii_runtime_measurements"
BINARY_MEASUREMENTS="$IMA_DIR/binary_runtime_measurements"
IMA_POLICY="$IMA_DIR/policy"
# hack to support running tests locally from ima/tests directory
if [ ! -d "$TST_DATAROOT" ]; then
TST_DATAROOT="$LTPROOT/../datafiles/$TST_ID/"
fi
print_ima_config
if [ "$TST_NEEDS_DEVICE" = 1 ]; then
tst_res TINFO "\$TMPDIR is on tmpfs => run on loop device"
mount_loop_device
fi
[ -n "$TST_SETUP_CALLER" ] && $TST_SETUP_CALLER
}
ima_cleanup()
{
local dir
[ -n "$TST_CLEANUP_CALLER" ] && $TST_CLEANUP_CALLER
for dir in $UMOUNT; do
umount $dir
done
if [ "$TST_NEEDS_DEVICE" = 1 ]; then
cd $TST_TMPDIR
tst_umount
fi
}
set_digest_index()
{
DIGEST_INDEX=
local template="$(tail -1 $ASCII_MEASUREMENTS | cut -d' ' -f 3)"
local i word
# parse digest index
# https://www.kernel.org/doc/html/latest/security/IMA-templates.html#use
case "$template" in
ima|ima-ng|ima-sig) DIGEST_INDEX=4 ;;
*)
# using ima_template_fmt kernel parameter
local IFS="|"
i=4
for word in $template; do
if [ "$word" = 'd' -o "$word" = 'd-ng' ]; then
DIGEST_INDEX=$i
break
fi
i=$((i+1))
done
esac
[ -z "$DIGEST_INDEX" ] && tst_brk TCONF \
"Cannot find digest index (template: '$template')"
}
get_algorithm_digest()
{
local line="$1"
local delimiter=':'
local algorithm digest
if [ -z "$line" ]; then
echo "measurement record not found"
return 1
fi
[ -z "$DIGEST_INDEX" ] && set_digest_index
digest=$(echo "$line" | cut -d' ' -f $DIGEST_INDEX)
if [ -z "$digest" ]; then
echo "digest not found (index: $DIGEST_INDEX, line: '$line')"
return 1
fi
if [ "${digest#*$delimiter}" != "$digest" ]; then
algorithm=$(echo "$digest" | cut -d $delimiter -f 1)
digest=$(echo "$digest" | cut -d $delimiter -f 2)
else
case "${#digest}" in
32) algorithm="md5" ;;
40) algorithm="sha1" ;;
*)
echo "algorithm must be either md5 or sha1 (digest: '$digest')"
return 1 ;;
esac
fi
if [ -z "$algorithm" ]; then
echo "algorithm not found"
return 1
fi
if [ -z "$digest" ]; then
echo "digest not found"
return 1
fi
echo "$algorithm|$digest"
}
ima_check()
{
local test_file="$1"
local algorithm digest expected_digest line tmp
# need to read file to get updated $ASCII_MEASUREMENTS
cat $test_file > /dev/null
line="$(grep $test_file $ASCII_MEASUREMENTS | tail -1)"
if tmp=$(get_algorithm_digest "$line"); then
algorithm=$(echo "$tmp" | cut -d'|' -f1)
digest=$(echo "$tmp" | cut -d'|' -f2)
else
tst_res TBROK "failed to get algorithm/digest for '$test_file': $tmp"
fi
tst_res TINFO "computing digest for $algorithm algorithm"
expected_digest="$(compute_digest $algorithm $test_file)" || \
tst_brk TCONF "cannot compute digest for $algorithm algorithm"
if [ "$digest" = "$expected_digest" ]; then
tst_res TPASS "correct digest found"
else
tst_res TFAIL "digest not found"
fi
}
# check_evmctl REQUIRED_TPM_VERSION
# return: 0: evmctl is new enough, 1: version older than required (or version < v0.9)
check_evmctl()
{
local required="$1"
local r1="$(echo $required | cut -d. -f1)"
local r2="$(echo $required | cut -d. -f2)"
local r3="$(echo $required | cut -d. -f3)"
[ -z "$r3" ] && r3=0
tst_is_int "$r1" || tst_brk TBROK "required major version not int ($v1)"
tst_is_int "$r2" || tst_brk TBROK "required minor version not int ($v2)"
tst_is_int "$r3" || tst_brk TBROK "required patch version not int ($v3)"
tst_check_cmds evmctl || return 1
local v="$(evmctl --version | cut -d' ' -f2)"
[ -z "$v" ] && return 1
tst_res TINFO "evmctl version: $v"
local v1="$(echo $v | cut -d. -f1)"
local v2="$(echo $v | cut -d. -f2)"
local v3="$(echo $v | cut -d. -f3)"
[ -z "$v3" ] && v3=0
if [ $v1 -lt $r1 ] || [ $v1 -eq $r1 -a $v2 -lt $r2 ] || \
[ $v1 -eq $r1 -a $v2 -eq $r2 -a $v3 -lt $r3 ]; then
return 1
fi
return 0
}
# require_evmctl REQUIRED_TPM_VERSION
require_evmctl()
{
local required="$1"
if ! check_evmctl $required; then
tst_brk TCONF "evmctl >= $required required"
fi
}
# loop device is needed to use only for tmpfs
TMPDIR="${TMPDIR:-/tmp}"
if [ "$(df -T $TMPDIR | tail -1 | awk '{print $2}')" != "tmpfs" -a -n "$TST_NEEDS_DEVICE" ]; then
unset TST_NEEDS_DEVICE
fi
|
linux-test-project/ltp
|
testcases/kernel/security/integrity/ima/tests/ima_setup.sh
|
Shell
|
gpl-2.0
| 7,999 |
#!/bin/sh
# This shell-script will import some GNS authorities into your GNS
# namestore.
gnunet-namestore -a -e never -n fcfs -p -t PKEY -V 72QC35CO20UJN1E91KPJFNT9TG4CLKAPB4VK9S3Q758S9MLBRKOG
|
h4ck3rm1k3/gnunet-debian
|
contrib/gnunet-gns-import.sh
|
Shell
|
gpl-3.0
| 194 |
#!/usr/bin/env zsh
# author: Filipe Silva (ninrod)
SCRIPTPATH=$(cd $(dirname $0); pwd -P) && cd $SCRIPTPATH
GIT_ROOT=$(git rev-parse --show-toplevel)
DIR=~/.config/ranger
TARGET=.
if [[ -d $DIR ]]; then
rm -rf $DIR
else
rm -f $DIR
fi
ln -s $(readlink -f $TARGET) $DIR
|
ninrod/dotfiles
|
options/ubuntu/ranger/makelink.zsh
|
Shell
|
gpl-3.0
| 276 |
#!/bin/sh
echo -n 'Preparing files...'
cd ..
rm -f tanglet.desktop.in
cp tanglet.desktop tanglet.desktop.in
sed -e '/^Name\[/ d' \
-e '/^GenericName\[/ d' \
-e '/^Comment\[/ d' \
-e '/^Icon/ d' \
-e '/^Keywords/ d' \
-i tanglet.desktop.in
rm -f tanglet.appdata.xml.in
cp tanglet.appdata.xml tanglet.appdata.xml.in
sed -e '/p xml:lang/ d' \
-e '/summary xml:lang/ d' \
-e '/name xml:lang/ d' \
-e '/<developer_name>/ d' \
-i tanglet.appdata.xml.in
rm -f tanglet.xml.in.h
rm -f tanglet.xml.in
cp tanglet.xml tanglet.xml.in
sed -e '/comment xml:lang/ d' \
-e 's/<comment>/<_comment>/' \
-e 's/<\/comment>/<\/_comment>/' \
-i tanglet.xml.in
intltool-extract --quiet --type=gettext/xml tanglet.xml.in
rm -f tanglet.xml.in
cd po
echo ' DONE'
echo -n 'Extracting messages...'
xgettext --from-code=UTF-8 --output=description.pot \
--package-name='Tanglet' --copyright-holder='Graeme Gott' \
../*.in
xgettext --from-code=UTF-8 -j --keyword=N_:1 --output=description.pot \
--package-name='Tanglet' --copyright-holder='Graeme Gott' \
../*.h
sed 's/CHARSET/UTF-8/' -i description.pot
echo ' DONE'
echo -n 'Cleaning up...'
cd ..
rm -f tanglet.desktop.in
rm -f tanglet.appdata.xml.in
rm -f tanglet.xml.in.h
echo ' DONE'
|
enz/tanglet
|
icons/po/update-pot.sh
|
Shell
|
gpl-3.0
| 1,234 |
#!/bin/sh
#
# srecord - Manipulate EPROM load files
# Copyright (C) 2011 Peter Miller
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
echo "<html>"
echo "<body>"
echo '<h1 align="center">'
echo '<img src="../srecord-64.png" align="left" />'
echo '<img src="../srecord-64.png" align="right" />'
echo "SRecord Manual Pages </h1>"
oldsection=1
echo '<br clear="all" />'
echo "<h2>Commands</h2>"
echo "<ul>"
for f in $*
do
base=$(basename $f .html)
section=$(echo $f | sed 's|.*/man\([0-9]\)/.*|\1|' )
if [ "$section" != "$oldsection" ]
then
echo "</ul>"
case $section in
1)
echo "<h2>Commands</h2>"
;;
3)
echo "<h2>Library</h2>"
;;
5)
echo "<h2>File Formats</h2>"
;;
*)
echo "<h2>Section $section</h2>"
;;
esac
echo "<ul>"
oldsection=$section
fi
echo "<li><a href=\"man$section/$base.html\" ><i>$base</i>($section)</a>"
done
echo "</ul>"
echo "</body>"
echo "</html>"
exit 0
# vim: set ts=8 sw=4 et :
|
freyc/SRecord
|
etc/man-html-index.sh
|
Shell
|
gpl-3.0
| 1,681 |
#!/bin/bash
#-----------------------------------------------------------------
# Runs serial version of merge throught sbatch on TACC
#
# This script requests one core (out of 16) on one node. The job
# will have access to all the memory in the node. Note that this
# job will be charged as if all 16 cores were requested.
#-----------------------------------------------------------------
#SBATCH -J mergeThreads3 # Job name
#SBATCH -o mergeThreads3.%j.out # Specify stdout output file (%j expands to jobId)
#SBATCH -p flat-quadrant # Queue name
#SBATCH -N 1 # Total number of nodes requested (16 cores/node)
#SBATCH -n 1 # Total number of tasks
#SBATCH -t 8:00:00 # Run time (hh:mm:ss) - 8 hours
#SBATCH [email protected]
#SBATCH --mail-type=begin # email me when the job starts
#SBATCH --mail-type=end # email me when the job finishes
# Launch merge of different sizes.
#./merge -A 1 -B 1
#./merge -A 1 -B 1
#./merge -A 1 -B 1
#./merge -A 5 -B 5
#./merge -A 5 -B 5
#./merge -A 5 -B 5
#./merge -A 20 -B 20
#./merge -A 20 -B 20
#./merge -A 30 -B 30
#./merge -A 32 -B 32
#./merge -A 40 -B 40
#./merge -A 50 -B 50
#./merge -A 100 -B 100
#./merge -A 100 -B 100
#./merge -A 128 -B 128
#./merge -A 128 -B 128
#./merge -A 200 -B 200
#./merge -A 500 -B 500
numactl --membind=1 ./merge -F
|
ogreen/avx-sort
|
src/sbatchrun.sh
|
Shell
|
gpl-3.0
| 1,389 |
if [[ -n "${ANSIBLE_VAULT_PASSWORD:-}" ]]; then
export ANSIBLE_VAULT_PASSWORD_FILE=$(mktemp)
echo "${ANSIBLE_VAULT_PASSWORD}" > "${ANSIBLE_VAULT_PASSWORD_FILE}"
else
>&2 echo "ERROR: ANSIBLE_VAULT_PASSWORD unset or empty"
exit 1
fi
|
wtsi-hgi/hgi-ansible
|
ci/before_scripts.d/15-ansible-vault-pw.sh
|
Shell
|
gpl-3.0
| 248 |
#!/bin/bash
#
# Script will be run after parameterization has completed, e.g.,
# use this to compile source code that has been parameterized.
#
# avoid error message in syslog
echo $1 | sudo -S touch /dev/xconsole
echo $1 | sudo -S chown syslog:adm /dev/xconsole
|
cliffe/SecGen
|
modules/utilities/unix/labtainers/files/Labtainers-master/labs/sys-log/sys-log/_bin/fixlocal.sh
|
Shell
|
gpl-3.0
| 266 |
#!/bin/bash
if [ -z "$1" ] ; then
cat << EOF
$0: Install the needed packages.
-------------------------------------
This script install:
* Some basic packages like vim, less, bash-completion
* Working packages like postgresql lighttpd default-jre-headless
* Utils packages like zip unzip git-core rsync
EOF
elif [ "$1" = exec ] ; then
sudo apt-get install vim less git-core bash-completion
sudo apt-get install postgresql lighttpd openssh-server default-jre-headless rsync
# Utils
sudo apt-get install zip unzip
else
echo "Parameter must be empty or 'exec'"
fi
|
BloatIt/bloatit
|
deployment/install/040-installPackages.sh
|
Shell
|
agpl-3.0
| 598 |
ghc -O2 main.hs -o ../aheui
|
xnuk/Comparison-of-aheui-implementations
|
test/xnuk.haskell-aheui.sh
|
Shell
|
unlicense
| 28 |
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(jbeda): Provide a way to override project
# gcloud multiplexing for shared GCE/GKE tests.
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
source "${KUBE_ROOT}/cluster/gce/config-common.sh"
# Specifying KUBE_GCE_API_ENDPOINT will override the default GCE Compute API endpoint (https://www.googleapis.com/compute/v1/).
# This endpoint has to be pointing to v1 api. For example, https://www.googleapis.com/compute/staging_v1/
export GCE_API_ENDPOINT=${KUBE_GCE_API_ENDPOINT:-}
ZONE=${KUBE_GCE_ZONE:-us-central1-b}
export REGION=${ZONE%-*}
RELEASE_REGION_FALLBACK=${RELEASE_REGION_FALLBACK:-false}
REGIONAL_KUBE_ADDONS=${REGIONAL_KUBE_ADDONS:-true}
# TODO: Migrate to e2-standard machine family.
NODE_SIZE=${NODE_SIZE:-n1-standard-2}
NUM_NODES=${NUM_NODES:-3}
NUM_WINDOWS_NODES=${NUM_WINDOWS_NODES:-0}
# TODO: Migrate to e2-standard machine family.
MASTER_SIZE=${MASTER_SIZE:-n1-standard-$(get-master-size)}
MASTER_MIN_CPU_ARCHITECTURE=${MASTER_MIN_CPU_ARCHITECTURE:-} # To allow choosing better architectures.
export MASTER_DISK_TYPE=pd-ssd
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-$(get-master-disk-size)}
MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-$(get-master-root-disk-size)}
NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard}
NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
NODE_LOCAL_SSDS=${NODE_LOCAL_SSDS:-0}
NODE_LABELS=${KUBE_NODE_LABELS:-}
WINDOWS_NODE_LABELS=${WINDOWS_NODE_LABELS:-}
NODE_LOCAL_SSDS_EPHEMERAL=${NODE_LOCAL_SSDS_EPHEMERAL:-}
# KUBE_CREATE_NODES can be used to avoid creating nodes, while master will be sized for NUM_NODES nodes.
# Firewalls and node templates are still created.
KUBE_CREATE_NODES=${KUBE_CREATE_NODES:-true}
# An extension to local SSDs allowing users to specify block/fs and SCSI/NVMe devices
# Format of this variable will be "#,scsi/nvme,block/fs" you can specify multiple
# configurations by separating them by a semi-colon ex. "2,scsi,fs;1,nvme,block"
# is a request for 2 SCSI formatted and mounted SSDs and 1 NVMe block device SSD.
NODE_LOCAL_SSDS_EXT=${NODE_LOCAL_SSDS_EXT:-}
NODE_ACCELERATORS=${NODE_ACCELERATORS:-''}
export REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true}
export KUBE_APISERVER_REQUEST_TIMEOUT=300
# Increase initial delay for the apiserver liveness probe, to avoid prematurely tearing it down
KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC=${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC:-45}
# Also increase the initial delay for etcd just to be safe
ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC=${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-45}
PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false}
PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false}
KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true}
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-true}
CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false}
MIG_WAIT_UNTIL_STABLE_TIMEOUT=${MIG_WAIT_UNTIL_STABLE_TIMEOUT:-1800}
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
WINDOWS_NODE_OS_DISTRIBUTION=${WINDOWS_NODE_OS_DISTRIBUTION:-win2019}
if [[ "${MASTER_OS_DISTRIBUTION}" = 'cos' ]]; then
MASTER_OS_DISTRIBUTION='gci'
fi
if [[ "${NODE_OS_DISTRIBUTION}" = 'cos' ]]; then
NODE_OS_DISTRIBUTION='gci'
fi
# GPUs supported in GCE do not have compatible drivers in Debian 7.
if [[ "${NODE_OS_DISTRIBUTION}" = 'debian' ]]; then
NODE_ACCELERATORS=''
fi
# To avoid failing large tests due to some flakes in starting nodes, allow
# for a small percentage of nodes to not start during cluster startup.
ALLOWED_NOTREADY_NODES=${ALLOWED_NOTREADY_NODES:-$(($(get-num-nodes) / 100))}
# By default a cluster will be started with the master and nodes
# on Container-optimized OS (cos, previously known as gci). If
# you are updating the os image versions, update this variable.
# Also please update corresponding image for node e2e at:
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
GCI_VERSION=${KUBE_GCI_VERSION:-cos-85-13310-1041-9}
export MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
export MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
export NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}}
export NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-cos-cloud}
export NODE_SERVICE_ACCOUNT=${KUBE_GCE_NODE_SERVICE_ACCOUNT:-default}
# Default container runtime for linux
export CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-containerd}
# Default container runtime for windows
export WINDOWS_CONTAINER_RUNTIME=${KUBE_WINDOWS_CONTAINER_RUNTIME:-docker}
# Set default values with override
if [[ "${CONTAINER_RUNTIME}" == "docker" ]]; then
export CONTAINER_RUNTIME_ENDPOINT=${KUBE_CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock}
export CONTAINER_RUNTIME_NAME=${KUBE_CONTAINER_RUNTIME_NAME:-docker}
export LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-}
elif [[ "${CONTAINER_RUNTIME}" == "containerd" || "${CONTAINER_RUNTIME}" == "remote" ]]; then
export CONTAINER_RUNTIME_ENDPOINT=${KUBE_CONTAINER_RUNTIME_ENDPOINT:-unix:///run/containerd/containerd.sock}
export CONTAINER_RUNTIME_NAME=${KUBE_CONTAINER_RUNTIME_NAME:-containerd}
export LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-ctr -n=k8s.io images import}
export LOG_DUMP_SYSTEMD_SERVICES=${LOG_DUMP_SYSTEMD_SERVICES:-containerd}
export CONTAINER_RUNTIME_TEST_HANDLER="true"
fi
export GCI_DOCKER_VERSION=${KUBE_GCI_DOCKER_VERSION:-}
# Ability to inject custom versions (Ubuntu OS images ONLY)
# if KUBE_UBUNTU_INSTALL_CONTAINERD_VERSION or KUBE_UBUNTU_INSTALL_RUNC_VERSION
# is set to empty then we do not override the version(s) and just
# use whatever is in the default installation of containerd package
export UBUNTU_INSTALL_CONTAINERD_VERSION=${KUBE_UBUNTU_INSTALL_CONTAINERD_VERSION:-}
export UBUNTU_INSTALL_RUNC_VERSION=${KUBE_UBUNTU_INSTALL_RUNC_VERSION:-}
# MASTER_EXTRA_METADATA is the extra instance metadata on master instance separated by commas.
export MASTER_EXTRA_METADATA=${KUBE_MASTER_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}}
# MASTER_EXTRA_METADATA is the extra instance metadata on node instance separated by commas.
export NODE_EXTRA_METADATA=${KUBE_NODE_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}}
NETWORK=${KUBE_GCE_NETWORK:-e2e-test-${USER}}
if [[ "${CREATE_CUSTOM_NETWORK}" = true ]]; then
SUBNETWORK=${SUBNETWORK:-${NETWORK}-custom-subnet}
fi
INSTANCE_PREFIX=${KUBE_GCE_INSTANCE_PREFIX:-e2e-test-${USER}}
CLUSTER_NAME=${CLUSTER_NAME:-${INSTANCE_PREFIX}}
MASTER_NAME="${INSTANCE_PREFIX}-master"
export AGGREGATOR_MASTER_NAME="${INSTANCE_PREFIX}-aggregator"
export INITIAL_ETCD_CLUSTER=${MASTER_NAME}
export MASTER_TAG="${INSTANCE_PREFIX}-master"
export NODE_TAG="${INSTANCE_PREFIX}-minion"
CLUSTER_IP_RANGE=${CLUSTER_IP_RANGE:-$(get-cluster-ip-range)}
MASTER_IP_RANGE=${MASTER_IP_RANGE:-10.246.0.0/24}
# NODE_IP_RANGE is used when ENABLE_IP_ALIASES=true or CREATE_CUSTOM_NETWORK=true.
# It is the primary range in the subnet and is the range used for node instance IPs.
NODE_IP_RANGE=$(get-node-ip-range)
export NODE_IP_RANGE
export RUNTIME_CONFIG=${KUBE_RUNTIME_CONFIG:-}
if [[ "${KUBE_FEATURE_GATES:-}" = 'AllAlpha=true' ]]; then
RUNTIME_CONFIG=${KUBE_RUNTIME_CONFIG:-api/all=true}
fi
# If feature gates includes AllAlpha or EndpointSlice, and EndpointSlice has not been disabled, add EndpointSlice controller to list of controllers to run.
if [[ (( "${KUBE_FEATURE_GATES:-}" = *"AllAlpha=true"* ) || ( "${KUBE_FEATURE_GATES:-}" = *"EndpointSlice=true"* )) && "${KUBE_FEATURE_GATES:-}" != *"EndpointSlice=false"* ]]; then
RUN_CONTROLLERS=${RUN_CONTROLLERS:-*,endpointslice}
fi
# Optional: set feature gates
FEATURE_GATES=${KUBE_FEATURE_GATES:-}
TERMINATED_POD_GC_THRESHOLD=${TERMINATED_POD_GC_THRESHOLD:-100}
# Extra docker options for nodes.
EXTRA_DOCKER_OPTS=${EXTRA_DOCKER_OPTS:-}
# Enable the docker debug mode.
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS} --debug"
export SERVICE_CLUSTER_IP_RANGE='10.0.0.0/16' # formerly PORTAL_NET
# When set to true, Docker Cache is enabled by default as part of the cluster bring up.
export ENABLE_DOCKER_REGISTRY_CACHE=true
# Optional: Deploy a L7 loadbalancer controller to fulfill Ingress requests:
# glbc - CE L7 Load Balancer Controller
export ENABLE_L7_LOADBALANCING=${KUBE_ENABLE_L7_LOADBALANCING:-glbc}
# Optional: Enable Metrics Server. Metrics Server should be enable everywhere,
# since it's a critical component, but in the first release we need a way to disable
# this in case of stability issues.
# TODO(piosz) remove this option once Metrics Server became a stable thing.
export ENABLE_METRICS_SERVER=${KUBE_ENABLE_METRICS_SERVER:-true}
# Optional: Metadata agent to setup as part of the cluster bring up:
# none - No metadata agent
# stackdriver - Stackdriver metadata agent
# Metadata agent is a daemon set that provides metadata of kubernetes objects
# running on the same node for exporting metrics and logs.
export ENABLE_METADATA_AGENT=${KUBE_ENABLE_METADATA_AGENT:-none}
# One special node out of NUM_NODES would be created of this type if specified.
# Useful for scheduling heapster in large clusters with nodes of small size.
HEAPSTER_MACHINE_TYPE=${HEAPSTER_MACHINE_TYPE:-}
# Optional: Additional nodes would be created if their type and number is specified.
# NUM_NODES would be lowered respectively.
# Useful for running cluster-level addons that needs more resources than would fit
# on small nodes, like network plugins.
NUM_ADDITIONAL_NODES=${NUM_ADDITIONAL_NODES:-}
ADDITIONAL_MACHINE_TYPE=${ADDITIONAL_MACHINE_TYPE:-}
# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.4.13-0) if you need
# non-default version.
export ETCD_IMAGE=${TEST_ETCD_IMAGE:-}
export ETCD_DOCKER_REPOSITORY=${TEST_ETCD_DOCKER_REPOSITORY:-}
export ETCD_VERSION=${TEST_ETCD_VERSION:-}
# Default Log level for all components in test clusters and variables to override it in specific components.
TEST_CLUSTER_LOG_LEVEL=${TEST_CLUSTER_LOG_LEVEL:---v=4}
KUBELET_TEST_LOG_LEVEL=${KUBELET_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}
DOCKER_TEST_LOG_LEVEL=${DOCKER_TEST_LOG_LEVEL:---log-level=info}
API_SERVER_TEST_LOG_LEVEL=${API_SERVER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}
CONTROLLER_MANAGER_TEST_LOG_LEVEL=${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}
SCHEDULER_TEST_LOG_LEVEL=${SCHEDULER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}
KUBEPROXY_TEST_LOG_LEVEL=${KUBEPROXY_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}
VOLUME_PLUGIN_DIR=${VOLUME_PLUGIN_DIR:-/home/kubernetes/flexvolume}
TEST_CLUSTER_DELETE_COLLECTION_WORKERS=${TEST_CLUSTER_DELETE_COLLECTION_WORKERS:---delete-collection-workers=1}
TEST_CLUSTER_MAX_REQUESTS_INFLIGHT=${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT:-}
TEST_CLUSTER_RESYNC_PERIOD=${TEST_CLUSTER_RESYNC_PERIOD:---min-resync-period=3m}
# ContentType used by all components to communicate with apiserver.
TEST_CLUSTER_API_CONTENT_TYPE=${TEST_CLUSTER_API_CONTENT_TYPE:-}
KUBELET_TEST_ARGS="${KUBELET_TEST_ARGS:-} --serialize-image-pulls=false ${TEST_CLUSTER_API_CONTENT_TYPE}"
if [[ "${NODE_OS_DISTRIBUTION}" = 'gci' ]] || [[ "${NODE_OS_DISTRIBUTION}" = 'ubuntu' ]] || [[ "${NODE_OS_DISTRIBUTION}" = 'custom' ]]; then
NODE_KUBELET_TEST_ARGS="${NODE_KUBELET_TEST_ARGS:-} --kernel-memcg-notification=true"
fi
if [[ "${MASTER_OS_DISTRIBUTION}" = 'gci' ]] || [[ "${MASTER_OS_DISTRIBUTION}" = 'ubuntu' ]]; then
MASTER_KUBELET_TEST_ARGS="${MASTER_KUBELET_TEST_ARGS:-} --kernel-memcg-notification=true"
fi
APISERVER_TEST_ARGS="${APISERVER_TEST_ARGS:-} --runtime-config=extensions/v1beta1,scheduling.k8s.io/v1alpha1 ${TEST_CLUSTER_DELETE_COLLECTION_WORKERS} ${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT}"
CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS:-} ${TEST_CLUSTER_RESYNC_PERIOD} ${TEST_CLUSTER_API_CONTENT_TYPE}"
SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
KUBEPROXY_TEST_ARGS="${KUBEPROXY_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
export MASTER_NODE_LABELS=${KUBE_MASTER_NODE_LABELS:-}
# NON_MASTER_NODE_LABELS are labels will only be applied on non-master nodes.
NON_MASTER_NODE_LABELS=${KUBE_NON_MASTER_NODE_LABELS:-}
WINDOWS_NON_MASTER_NODE_LABELS=${WINDOWS_NON_MASTER_NODE_LABELS:-}
if [[ "${PREEMPTIBLE_MASTER}" = 'true' ]]; then
NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-preemptible=true"
WINDOWS_NODE_LABELS="${WINDOWS_NODE_LABELS},cloud.google.com/gke-preemptible=true"
elif [[ "${PREEMPTIBLE_NODE}" = 'true' ]]; then
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-preemptible=true"
WINDOWS_NON_MASTER_NODE_LABELS="${WINDOWS_NON_MASTER_NODE_LABELS},cloud.google.com/gke-preemptible=true"
fi
# Optional: Enable netd.
ENABLE_NETD=${KUBE_ENABLE_NETD:-false}
export CUSTOM_NETD_YAML=${KUBE_CUSTOM_NETD_YAML:-}
export CUSTOM_CALICO_NODE_DAEMONSET_YAML=${KUBE_CUSTOM_CALICO_NODE_DAEMONSET_YAML:-}
export CUSTOM_TYPHA_DEPLOYMENT_YAML=${KUBE_CUSTOM_TYPHA_DEPLOYMENT_YAML:-}
# To avoid running netd on a node that is not configured appropriately,
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
# Windows nodes do not support netd.
if [[ ${ENABLE_NETD:-} = 'true' ]]; then
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}cloud.google.com/gke-netd-ready=true"
fi
export ENABLE_NODELOCAL_DNS=${KUBE_ENABLE_NODELOCAL_DNS:-false}
# To avoid running Calico on a node that is not configured appropriately,
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
# Windows nodes do not support Calico.
if [[ ${NETWORK_POLICY_PROVIDER:-} = 'calico' ]]; then
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}projectcalico.org/ds-ready=true"
fi
# Enable metadata concealment by firewalling pod traffic to the metadata server
# and run a proxy daemonset on nodes.
ENABLE_METADATA_CONCEALMENT=${ENABLE_METADATA_CONCEALMENT:-true} # true, false
METADATA_CONCEALMENT_NO_FIREWALL=${METADATA_CONCEALMENT_NO_FIREWALL:-false} # true, false
if [[ ${ENABLE_METADATA_CONCEALMENT:-} = 'true' ]]; then
# Put the necessary label on the node so the daemonset gets scheduled.
NODE_LABELS="${NODE_LABELS},cloud.google.com/metadata-proxy-ready=true"
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_METADATA_CONCEALMENT METADATA_CONCEALMENT_NO_FIREWALL"
fi
# Optional: Enable node logging.
export ENABLE_NODE_LOGGING=${KUBE_ENABLE_NODE_LOGGING:-true}
export LOGGING_DESTINATION=${KUBE_LOGGING_DESTINATION:-gcp} # options: elasticsearch, gcp
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
export ENABLE_CLUSTER_LOGGING=${KUBE_ENABLE_CLUSTER_LOGGING:-true}
export ELASTICSEARCH_LOGGING_REPLICAS=1
# Optional: Don't require https for registries in our local RFC1918 network
if [[ ${KUBE_ENABLE_INSECURE_REGISTRY:-false} = 'true' ]]; then
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS} --insecure-registry 10.0.0.0/8"
fi
if [[ -n "${NODE_ACCELERATORS}" ]]; then
if [[ -z "${FEATURE_GATES:-}" ]]; then
FEATURE_GATES='DevicePlugins=true'
else
FEATURE_GATES="${FEATURE_GATES},DevicePlugins=true"
fi
if [[ "${NODE_ACCELERATORS}" =~ .*type=([a-zA-Z0-9-]+).* ]]; then
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}"
fi
fi
# Optional: Install cluster DNS.
# Set CLUSTER_DNS_CORE_DNS to 'false' to install kube-dns instead of CoreDNS.
CLUSTER_DNS_CORE_DNS=${CLUSTER_DNS_CORE_DNS:-true}
export ENABLE_CLUSTER_DNS=${KUBE_ENABLE_CLUSTER_DNS:-true}
export DNS_SERVER_IP='10.0.0.10'
export LOCAL_DNS_IP=${KUBE_LOCAL_DNS_IP:-169.254.20.10}
export DNS_DOMAIN='cluster.local'
export DNS_MEMORY_LIMIT=${KUBE_DNS_MEMORY_LIMIT:-170Mi}
# Optional: Enable DNS horizontal autoscaler
export ENABLE_DNS_HORIZONTAL_AUTOSCALER=${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}
# Optional: Install Kubernetes UI
export ENABLE_CLUSTER_UI=${KUBE_ENABLE_CLUSTER_UI:-true}
# Optional: Install node problem detector.
# none - Not run node problem detector.
# daemonset - Run node problem detector as daemonset.
# standalone - Run node problem detector as standalone system daemon.
if [[ "${NODE_OS_DISTRIBUTION}" = 'gci' ]]; then
# Enable standalone mode by default for gci.
ENABLE_NODE_PROBLEM_DETECTOR=${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-standalone}
else
ENABLE_NODE_PROBLEM_DETECTOR=${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-daemonset}
fi
export ENABLE_NODE_PROBLEM_DETECTOR
NODE_PROBLEM_DETECTOR_VERSION=${NODE_PROBLEM_DETECTOR_VERSION:-}
NODE_PROBLEM_DETECTOR_TAR_HASH=${NODE_PROBLEM_DETECTOR_TAR_HASH:-}
NODE_PROBLEM_DETECTOR_RELEASE_PATH=${NODE_PROBLEM_DETECTOR_RELEASE_PATH:-}
NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS=${NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS:-}
CNI_HASH=${CNI_HASH:-}
CNI_TAR_PREFIX=${CNI_TAR_PREFIX:-cni-plugins-linux-amd64-}
CNI_STORAGE_URL_BASE=${CNI_STORAGE_URL_BASE:-https://storage.googleapis.com/k8s-artifacts-cni/release}
# Optional: Create autoscaler for cluster's nodes.
export ENABLE_CLUSTER_AUTOSCALER=${KUBE_ENABLE_CLUSTER_AUTOSCALER:-false}
if [[ "${ENABLE_CLUSTER_AUTOSCALER}" = 'true' ]]; then
export AUTOSCALER_MIN_NODES=${KUBE_AUTOSCALER_MIN_NODES:-}
export AUTOSCALER_MAX_NODES=${KUBE_AUTOSCALER_MAX_NODES:-}
export AUTOSCALER_ENABLE_SCALE_DOWN=${KUBE_AUTOSCALER_ENABLE_SCALE_DOWN:-false}
export AUTOSCALER_EXPANDER_CONFIG=${KUBE_AUTOSCALER_EXPANDER_CONFIG:---expander=price}
fi
# Optional: Enable allocation of pod IPs using IP aliases.
#
# BETA FEATURE.
#
# IP_ALIAS_SIZE is the size of the podCIDR allocated to a node.
# IP_ALIAS_SUBNETWORK is the subnetwork to allocate from. If empty, a
# new subnetwork will be created for the cluster.
ENABLE_IP_ALIASES=${KUBE_GCE_ENABLE_IP_ALIASES:-false}
export NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-RangeAllocator}
if [ "${ENABLE_IP_ALIASES}" = true ]; then
# Number of Pods that can run on this node.
MAX_PODS_PER_NODE=${MAX_PODS_PER_NODE:-110}
# Size of ranges allocated to each node.
IP_ALIAS_SIZE="/$(get-alias-range-size "${MAX_PODS_PER_NODE}")"
IP_ALIAS_SUBNETWORK=${KUBE_GCE_IP_ALIAS_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-default}
# If we're using custom network, use the subnet we already create for it as the one for ip-alias.
# Note that this means SUBNETWORK would override KUBE_GCE_IP_ALIAS_SUBNETWORK in case of custom network.
if [[ "${CREATE_CUSTOM_NETWORK}" = true ]]; then
IP_ALIAS_SUBNETWORK=${SUBNETWORK}
fi
export IP_ALIAS_SIZE
export IP_ALIAS_SUBNETWORK
# Reserve the services IP space to avoid being allocated for other GCP resources.
export SERVICE_CLUSTER_IP_SUBNETWORK=${KUBE_GCE_SERVICE_CLUSTER_IP_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-services}
NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-CloudAllocator}
SECONDARY_RANGE_NAME=${SECONDARY_RANGE_NAME:-}
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_IP_ALIASES"
PROVIDER_VARS="${PROVIDER_VARS:-} NODE_IPAM_MODE"
PROVIDER_VARS="${PROVIDER_VARS:-} SECONDARY_RANGE_NAME"
else
if [[ -n "${MAX_PODS_PER_NODE:-}" ]]; then
# Should not have MAX_PODS_PER_NODE set for route-based clusters.
echo -e "${color_red:-}Cannot set MAX_PODS_PER_NODE for route-based projects for ${PROJECT}." >&2
exit 1
fi
if [[ "$(get-num-nodes)" -gt 100 ]]; then
echo -e "${color_red:-}Cannot create cluster with more than 100 nodes for route-based projects for ${PROJECT}." >&2
exit 1
fi
fi
# Enable GCE Alpha features.
if [[ -n "${GCE_ALPHA_FEATURES:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} GCE_ALPHA_FEATURES"
fi
# Disable Docker live-restore.
if [[ -n "${DISABLE_DOCKER_LIVE_RESTORE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} DISABLE_DOCKER_LIVE_RESTORE"
fi
# Override default GLBC image
if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} GCE_GLBC_IMAGE"
fi
CUSTOM_INGRESS_YAML=${CUSTOM_INGRESS_YAML:-}
if [[ -z "${KUBE_ADMISSION_CONTROL:-}" ]]; then
ADMISSION_CONTROL='NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection,PersistentVolumeClaimResize,RuntimeClass'
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" = 'true' ]]; then
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
fi
# ResourceQuota must come last, or a creation is recorded, but the pod may be forbidden.
ADMISSION_CONTROL="${ADMISSION_CONTROL},MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
else
ADMISSION_CONTROL=${KUBE_ADMISSION_CONTROL}
fi
ENABLE_APISERVER_DYNAMIC_AUDIT=${ENABLE_APISERVER_DYNAMIC_AUDIT:-false}
# Optional: if set to true kube-up will automatically check for existing resources and clean them up.
KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
# Optional: setting it to true denotes this is a testing cluster,
# so that we can use pulled kubernetes binaries, even if binaries
# are pre-installed in the image. Note that currently this logic
# is only supported in trusty or GCI.
TEST_CLUSTER=${TEST_CLUSTER:-true}
# Storage backend. 'etcd2' and 'etcd3' are supported.
STORAGE_BACKEND=${STORAGE_BACKEND:-}
# Storage media type: application/json and application/vnd.kubernetes.protobuf are supported.
STORAGE_MEDIA_TYPE=${STORAGE_MEDIA_TYPE:-}
NETWORK_PROVIDER=${NETWORK_PROVIDER:-kubenet} # none, kubenet
# Network Policy plugin specific settings.
NETWORK_POLICY_PROVIDER=${NETWORK_POLICY_PROVIDER:-none} # calico
export NON_MASQUERADE_CIDR='0.0.0.0/0'
# How should the kubelet configure hairpin mode?
HAIRPIN_MODE=${HAIRPIN_MODE:-hairpin-veth} # promiscuous-bridge, hairpin-veth, none
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
export E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
# Evict pods whenever compute resource availability on the nodes gets below a threshold.
EVICTION_HARD=${EVICTION_HARD:-memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%}
# Optional: custom scheduling algorithm
SCHEDULING_ALGORITHM_PROVIDER=${SCHEDULING_ALGORITHM_PROVIDER:-}
# Optional: install a default StorageClass
ENABLE_DEFAULT_STORAGE_CLASS=${ENABLE_DEFAULT_STORAGE_CLASS:-true}
# Optional: install volume snapshot CRDs
ENABLE_VOLUME_SNAPSHOTS=${ENABLE_VOLUME_SNAPSHOTS:-true}
# Optional: Enable legacy ABAC policy that makes all service accounts superusers.
# Disabling this by default in tests ensures default RBAC policies are sufficient from 1.6+
# Upgrade test jobs that go from a version < 1.6 to a version >= 1.6 should override this to be true.
ENABLE_LEGACY_ABAC=${ENABLE_LEGACY_ABAC:-false} # true, false
# Enable a simple "AdvancedAuditing" setup for testing.
ENABLE_APISERVER_ADVANCED_AUDIT=${ENABLE_APISERVER_ADVANCED_AUDIT:-true} # true, false
ADVANCED_AUDIT_LOG_MODE=${ADVANCED_AUDIT_LOG_MODE:-batch} # batch, blocking
ENABLE_BIG_CLUSTER_SUBNETS=${ENABLE_BIG_CLUSTER_SUBNETS:-false}
# Optional: Enable log rotation for k8s services
ENABLE_LOGROTATE_FILES="${ENABLE_LOGROTATE_FILES:-true}"
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_LOGROTATE_FILES"
if [[ -n "${LOGROTATE_FILES_MAX_COUNT:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_FILES_MAX_COUNT"
fi
if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_MAX_SIZE"
fi
# Optional: Enable log rotation for pod logs
ENABLE_POD_LOG="${ENABLE_POD_LOG:-false}"
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_POD_LOG"
if [[ -n "${POD_LOG_MAX_FILE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} POD_LOG_MAX_FILE"
fi
if [[ -n "${POD_LOG_MAX_SIZE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} POD_LOG_MAX_SIZE"
fi
# Fluentd requirements
# YAML exists to trigger a configuration refresh when changes are made.
export FLUENTD_GCP_YAML_VERSION='v3.2.0'
FLUENTD_GCP_VERSION=${FLUENTD_GCP_VERSION:-1.6.17}
FLUENTD_GCP_MEMORY_LIMIT=${FLUENTD_GCP_MEMORY_LIMIT:-}
FLUENTD_GCP_CPU_REQUEST=${FLUENTD_GCP_CPU_REQUEST:-}
FLUENTD_GCP_MEMORY_REQUEST=${FLUENTD_GCP_MEMORY_REQUEST:-}
# Heapster requirements
HEAPSTER_GCP_BASE_MEMORY=${HEAPSTER_GCP_BASE_MEMORY:-140Mi}
HEAPSTER_GCP_MEMORY_PER_NODE=${HEAPSTER_GCP_MEMORY_PER_NODE:-4}
HEAPSTER_GCP_BASE_CPU=${HEAPSTER_GCP_BASE_CPU:-80m}
HEAPSTER_GCP_CPU_PER_NODE=${HEAPSTER_GCP_CPU_PER_NODE:-0.5}
# Optional: custom system banner for dashboard addon
CUSTOM_KUBE_DASHBOARD_BANNER=${CUSTOM_KUBE_DASHBOARD_BANNER:-}
# Default Stackdriver resources version exported by Fluentd-gcp addon
LOGGING_STACKDRIVER_RESOURCE_TYPES=${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}
# Adding to PROVIDER_VARS, since this is GCP-specific.
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_YAML_VERSION FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE CUSTOM_KUBE_DASHBOARD_BANNER LOGGING_STACKDRIVER_RESOURCE_TYPES"
# Fluentd configuration for node-journal
ENABLE_NODE_JOURNAL=${ENABLE_NODE_JOURNAL:-false}
# prometheus-to-sd configuration
PROMETHEUS_TO_SD_ENDPOINT=${PROMETHEUS_TO_SD_ENDPOINT:-https://monitoring.googleapis.com/}
PROMETHEUS_TO_SD_PREFIX=${PROMETHEUS_TO_SD_PREFIX:-custom.googleapis.com}
ENABLE_PROMETHEUS_TO_SD=${ENABLE_PROMETHEUS_TO_SD:-true}
# TODO(#51292): Make kube-proxy Daemonset default and remove the configuration here.
# Optional: [Experiment Only] Run kube-proxy as a DaemonSet if set to true, run as static pods otherwise.
KUBE_PROXY_DAEMONSET=${KUBE_PROXY_DAEMONSET:-false} # true, false
# Control whether the startup scripts manage the lifecycle of kube-proxy
# When true, the startup scripts do not enable kube-proxy either as a daemonset addon or as a static pod
# regardless of the value of KUBE_PROXY_DAEMONSET.
# When false, the value of KUBE_PROXY_DAEMONSET controls whether kube-proxy comes up as a static pod or
# as an addon daemonset.
KUBE_PROXY_DISABLE="${KUBE_PROXY_DISABLE:-false}" # true, false
# Optional: Change the kube-proxy implementation. Choices are [iptables, ipvs].
KUBE_PROXY_MODE=${KUBE_PROXY_MODE:-iptables}
# Will be passed into the kube-proxy via `--detect-local-mode`
DETECT_LOCAL_MODE="${DETECT_LOCAL_MODE:-NodeCIDR}"
# Optional: duration of cluster signed certificates.
CLUSTER_SIGNING_DURATION=${CLUSTER_SIGNING_DURATION:-}
# Optional: enable certificate rotation of the kubelet certificates.
ROTATE_CERTIFICATES=${ROTATE_CERTIFICATES:-}
# The number of services that are allowed to sync concurrently. Will be passed
# into kube-controller-manager via `--concurrent-service-syncs`
CONCURRENT_SERVICE_SYNCS=${CONCURRENT_SERVICE_SYNCS:-}
# The value kubernetes.default.svc is only usable in Pods and should only be
# set for tests. DO NOT COPY THIS VALUE FOR PRODUCTION CLUSTERS.
export SERVICEACCOUNT_ISSUER='https://kubernetes.default.svc'
# Optional: Enable Node termination Handler for Preemptible and GPU VMs.
# https://github.com/GoogleCloudPlatform/k8s-node-termination-handler
ENABLE_NODE_TERMINATION_HANDLER=${ENABLE_NODE_TERMINATION_HANDLER:-false}
# Override default Node Termination Handler Image
if [[ "${NODE_TERMINATION_HANDLER_IMAGE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} NODE_TERMINATION_HANDLER_IMAGE"
fi
# Taint Windows nodes by default to prevent Linux workloads from being
# scheduled onto them.
WINDOWS_NODE_TAINTS=${WINDOWS_NODE_TAINTS:-node.kubernetes.io/os=win1809:NoSchedule}
# Whether to set up a private GCE cluster, i.e. a cluster where nodes have only private IPs.
export GCE_PRIVATE_CLUSTER=${KUBE_GCE_PRIVATE_CLUSTER:-false}
export GCE_PRIVATE_CLUSTER_PORTS_PER_VM=${KUBE_GCE_PRIVATE_CLUSTER_PORTS_PER_VM:-}
export ETCD_LISTEN_CLIENT_IP=0.0.0.0
export GCE_UPLOAD_KUBCONFIG_TO_MASTER_METADATA=true
# Optoinal: Enable Windows CSI-Proxy
export ENABLE_CSI_PROXY="${ENABLE_CSI_PROXY:-true}"
# KUBE_APISERVER_HEALTHCHECK_ON_HOST_IP decides whether
# kube-apiserver is healthchecked on host IP instead of 127.0.0.1.
export KUBE_APISERVER_HEALTHCHECK_ON_HOST_IP="${KUBE_APISERVER_HEALTHCHECK_ON_HOST_IP:-false}"
# ETCD_LISTEN_ON_HOST_IP decides whether etcd servers should also listen on host IP,
# in addition to listening to 127.0.0.1, and whether kube-apiserver should connect to etcd servers
# through host IP.
export ETCD_LISTEN_ON_HOST_IP="${ETCD_LISTEN_ON_HOST_IP:-false}"
# ETCD_PROGRESS_NOTIFY_INTERVAL defines the interval for etcd watch progress notify events.
export ETCD_PROGRESS_NOTIFY_INTERVAL="${ETCD_PROGRESS_NOTIFY_INTERVAL:-10m}"
# Use host IP instead of localhost in control plane kubeconfig files.
export KUBECONFIG_USE_HOST_IP="${KUBECONFIG_USE_HOST_IP:-false}"
|
ravisantoshgudimetla/kubernetes
|
cluster/gce/config-test.sh
|
Shell
|
apache-2.0
| 29,153 |
#!/bin/sh
if [ -z "$MYSQL_PORT" ]; then
MYSQL_PORT=3306
fi
echo "
create database if not exists domeos;
create database if not exists graph;
create database if not exists portal;
" > ./create.sql;
mysql -h ${MYSQL_HOST} -P ${MYSQL_PORT} -u ${MYSQL_USERNAME} -p${MYSQL_PASSWORD} < ./create.sql;
sleep 5
echo "use domeos;" > ./init.sql
cat ./create-db.sql >> ./init.sql
cat ./insert-data.sql >> ./init.sql
mysql -h ${MYSQL_HOST} -P ${MYSQL_PORT} -u ${MYSQL_USERNAME} -p${MYSQL_PASSWORD} < ./init.sql
sleep 5
mysql -h ${MYSQL_HOST} -P ${MYSQL_PORT} -u ${MYSQL_USERNAME} -p${MYSQL_PASSWORD} < ./graph-db-schema.sql
sleep 5
mysql -h ${MYSQL_HOST} -P ${MYSQL_PORT} -u ${MYSQL_USERNAME} -p${MYSQL_PASSWORD} < ./portal-db-schema.sql
|
domeos/server
|
src/main/resources/old-version.v0.2/mysql-initialize.sh
|
Shell
|
apache-2.0
| 737 |
#!/bin/sh
# ----------------------------------------------------------------------------
# Copyright 2001-2006 The Apache Software Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
#
# Copyright (c) 2001-2006 The Apache Software Foundation. All rights
# reserved.
# resolve links - $0 may be a softlink
PRG="$0"
while [ -h "$PRG" ]; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`/"$link"
fi
done
PRGDIR=`dirname "$PRG"`
BASEDIR=`cd "$PRGDIR/.." >/dev/null; pwd`
# Reset the REPO variable. If you need to influence this use the environment setup file.
REPO=
# OS specific support. $var _must_ be set to either true or false.
cygwin=false;
darwin=false;
case "`uname`" in
CYGWIN*) cygwin=true ;;
Darwin*) darwin=true
if [ -z "$JAVA_VERSION" ] ; then
JAVA_VERSION="CurrentJDK"
else
echo "Using Java version: $JAVA_VERSION"
fi
if [ -z "$JAVA_HOME" ]; then
if [ -x "/usr/libexec/java_home" ]; then
JAVA_HOME=`/usr/libexec/java_home`
else
JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/${JAVA_VERSION}/Home
fi
fi
;;
esac
if [ -z "$JAVA_HOME" ] ; then
if [ -r /etc/gentoo-release ] ; then
JAVA_HOME=`java-config --jre-home`
fi
fi
# For Cygwin, ensure paths are in UNIX format before anything is touched
if $cygwin ; then
[ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
[ -n "$CLASSPATH" ] && CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
fi
# If a specific java binary isn't specified search for the standard 'java' binary
if [ -z "$JAVACMD" ] ; then
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java"
else
JAVACMD="$JAVA_HOME/bin/java"
fi
else
JAVACMD=`which java`
fi
fi
if [ ! -x "$JAVACMD" ] ; then
echo "Error: JAVA_HOME is not defined correctly." 1>&2
echo " We cannot execute $JAVACMD" 1>&2
exit 1
fi
if [ -z "$REPO" ]
then
REPO="$BASEDIR"/lib
fi
CLASSPATH="$BASEDIR"/conf:"$REPO"/*
ENDORSED_DIR=
if [ -n "$ENDORSED_DIR" ] ; then
CLASSPATH=$BASEDIR/$ENDORSED_DIR/*:$CLASSPATH
fi
if [ -n "$CLASSPATH_PREFIX" ] ; then
CLASSPATH=$CLASSPATH_PREFIX:$CLASSPATH
fi
# For Cygwin, switch paths to Windows format before running java
if $cygwin; then
[ -n "$CLASSPATH" ] && CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
[ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
[ -n "$HOME" ] && HOME=`cygpath --path --windows "$HOME"`
[ -n "$BASEDIR" ] && BASEDIR=`cygpath --path --windows "$BASEDIR"`
[ -n "$REPO" ] && REPO=`cygpath --path --windows "$REPO"`
fi
CLASSPATH=$CLASSPATH:$3/wso2/lib/*
exec "$JAVACMD" $JAVA_OPTS -Djndi.bind.off="true" \
-classpath "$CLASSPATH" \
-Dapp.name="forgetme" \
-Dapp.pid="$$" \
-Dapp.repo="$REPO" \
-Dapp.home="$BASEDIR" \
-Dbasedir="$BASEDIR" \
org.wso2.carbon.privacy.forgetme.ForgetMeTool \
"$@"
|
milindaperera/product-ei
|
distribution/src/scripts/tools/forgetme/forgetme.sh
|
Shell
|
apache-2.0
| 3,750 |
#3> <#> a <http://purl.org/twc/vocab/conversion/CSV2RDF4LOD_environment_variables> ;
#3> rdfs:seeAlso
#3> <http://purl.org/twc/page/csv2rdf4lod/distributed_env_vars>,
#3> <https://github.com/timrdf/csv2rdf4lod-automation/wiki/Script:-source-me.sh> .
export CSV2RDF4LOD_CKAN="true"
export CSV2RDF4LOD_CKAN_SOURCE="http://hub.healthdata.gov"
export CSV2RDF4LOD_CKAN_WRITABLE="http://healthdata.tw.rpi.edu/hub"
source /srv/twc-healthdata/config/ckan/csv2rdf4lod-source-me-for-ckan-api-key.sh # for X_CKAN_API_Key
|
timrdf/snoozl
|
data/source/csv2rdf4lod-source-me-when-zzz.sh
|
Shell
|
apache-2.0
| 524 |
#!/bin/bash
loglevel="none"
echo ">>> REMOVING PREVIOUS TEST DATABASE"
node index.js --log-level $loglevel --delete-app test --config "./tests/config.test.json"
echo ">>> CREATING TEST DATABASE"
node index.js --log-level $loglevel --add-app test --config "./tests/config.test.json"
echo ">>> CREATING API KEY"
node index.js --log-level $loglevel --create-api-key test --using testkey --config "./tests/config.test.json"
echo ">>> RUNNING SERVER"
node index.js test-fruum-server-daemon --log-level $loglevel --config "./tests/config.test.json" &
sleep 3
pid=$(ps -ef | grep "test-fruum-server-daemon" | grep -v "grep" | awk '{print $2}')
echo "Server running on pid: $pid"
echo ">>> RUNNING TESTS"
./node_modules/.bin/jasmine-node tests --verbose --captureExceptions
echo ">>> KILLING SERVER"
kill -9 $pid
sleep 1
echo ">>> DELETING TEST DATABASE"
node index.js --log-level $loglevel --delete-app test --config "./tests/config.test.json"
echo ">>> DONE"
|
fruum/fruum
|
tests/run.sh
|
Shell
|
apache-2.0
| 954 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/Zip/Zip.framework"
install_framework "$BUILT_PRODUCTS_DIR/iOSDFULibrary/iOSDFULibrary.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/Zip/Zip.framework"
install_framework "$BUILT_PRODUCTS_DIR/iOSDFULibrary/iOSDFULibrary.framework"
fi
|
developerXiong/CYBleManager
|
Pods/Target Support Files/Pods-CYBluetoothManager/Pods-CYBluetoothManager-frameworks.sh
|
Shell
|
mit
| 3,749 |
#!/bin/bash
FN="yeast.db0_3.8.2.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.9/data/annotation/src/contrib/yeast.db0_3.8.2.tar.gz"
"https://bioarchive.galaxyproject.org/yeast.db0_3.8.2.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-yeast.db0/bioconductor-yeast.db0_3.8.2_src_all.tar.gz"
)
MD5="550cd8cf56dd2a399588a06e8dfe6677"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
mdehollander/bioconda-recipes
|
recipes/bioconductor-yeast.db0/post-link.sh
|
Shell
|
mit
| 1,292 |
#!/bin/sh
set -e
set -x
cat <<EOF > /etc/installurl
https://fastly.cdn.openbsd.org/pub/OpenBSD
EOF
pkg_add sudo--
cat <<EOF > /etc/sudoers
#includedir /etc/sudoers.d
EOF
mkdir /etc/sudoers.d
cat <<EOF > /etc/sudoers.d/vagrant
Defaults:vagrant !requiretty
vagrant ALL=(ALL) NOPASSWD: ALL
root ALL=(ALL) NOPASSWD: ALL
EOF
chmod 440 /etc/sudoers.d/vagrant
cat <<EOF > /etc/boot.conf
set timeout 1
EOF
|
reallyenglish/packer-templates
|
http/openbsd-6.3/install-chroot.sh
|
Shell
|
mit
| 403 |
# Validation test for the urdf youbot model
rosrun xacro xacro.py `rospack find youbot_description`/robots/youbot_base.urdf.xacro -o ./youbot_base.urdf
rosrun urdf check_urdf ./youbot_base.urdf
rosrun xacro xacro.py `rospack find youbot_description`/robots/youbot_arm.urdf.xacro -o ./youbot_arm.urdf
rosrun urdf check_urdf ./youbot_arm.urdf
rosrun xacro xacro.py `rospack find youbot_description`/robots/youbot_gripper.urdf.xacro -o ./youbot_gripper.urdf
rosrun urdf check_urdf ./youbot_gripper.urdf
rosrun xacro xacro.py `rospack find youbot_description`/robots/youbot.urdf.xacro -o ./youbot.urdf
rosrun urdf check_urdf ./youbot.urdf
|
MatoMA/youbot_description
|
tests/test_urdf_model.sh
|
Shell
|
mit
| 639 |
# About environment variables
lesson_title "Variables"
test_setting_the_variable() {
local variable=1
assertEqual 1 __
}
test_using_double_quotes() {
local variable=2
assertEqual "foo $variable" __
}
test_unsetting_variables() {
local newVariable="Foooo"
unset newVariable
assertEqual $newVariable __
}
# Variables defined in global namespace are available everywhere
THIS_VARIABLE_IS_GLOBAL=42
test_global_variables() {
assertEqual $THIS_VARIABLE_IS_GLOBAL __
}
# In this function we define a global variable, it becomes available outside
function_with_a_global_variable() {
THIS_VARIABLE_IS_GLOBAL_FROM_A_FUNCTION=42
}
# Run the function
function_with_a_global_variable
test_global_variables_from_functions() {
assertEqual $THIS_VARIABLE_IS_GLOBAL_FROM_A_FUNCTION __
}
# In this function we define a local variable, it is not accessible outside
function_with_a_local_variable() {
local THIS_VARIABLE_IS_LOCAL=42
}
# Run the function
function_with_a_local_variable
test_local_variables() {
assertEqual $THIS_VARIABLE_IS_LOCAL __
}
test_variable_name_expansion_within_text() {
local var1=myvar
# __ = this_is_myvar_yay
assertEqual this_is_${var1}_yay __
}
test_only_exported_variables_are_accessible_by_another_process() {
local MY_EXPORTED_VARIABLE=43
assertEqual "$(support/variable_check)" __
MY_EXPORTED_VARIABLE=43
assertEqual "$(support/variable_check)" __
export MY_EXPORTED_VARIABLE=43
assertEqual "$(support/variable_check)" __
}
|
nadavc/bash_koans
|
src/00_about_variables.sh
|
Shell
|
mit
| 1,500 |
#!/bin/bash
#
# This test was used in a set of CMD3 tests (cmd3-4 test).
LUSTRE=${LUSTRE:-`dirname $0`/..}
. $LUSTRE/tests/test-framework.sh
init_test_env $@
. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
assert_env CLIENTS MDSRATE SINGLECLIENT MPIRUN
MACHINEFILE=${MACHINEFILE:-$TMP/$(basename $0 .sh).machines}
BASEDIR=$MOUNT/mdsrate
# Requirements
# set NUM_FILES=0 to force TIME_PERIOD work
NUM_FILES=${NUM_FILES:-1000000}
TIME_PERIOD=${TIME_PERIOD:-600} # seconds
# Local test variables
TESTDIR_SINGLE="${BASEDIR}/single"
TESTDIR_MULTI="${BASEDIR}/multi"
LOG=${TESTSUITELOG:-$TMP/$(basename $0 .sh).log}
CLIENT=$SINGLECLIENT
NODES_TO_USE=${NODES_TO_USE:-$CLIENTS}
NUM_CLIENTS=$(get_node_count ${NODES_TO_USE//,/ })
[ ! -x ${MDSRATE} ] && error "${MDSRATE} not built."
log "===== $0 ====== "
check_and_setup_lustre
mkdir -p $BASEDIR
chmod 0777 $BASEDIR
$LFS setstripe $BASEDIR -c -1
get_stripe $BASEDIR
IFree=$(mdsrate_inodes_available)
if [ $IFree -lt $NUM_FILES ]; then
NUM_FILES=$IFree
fi
generate_machine_file $NODES_TO_USE $MACHINEFILE || error "can not generate machinefile"
# Make sure we start with a clean slate
rm -f ${LOG}
if [ -n "$NOSINGLE" ]; then
echo "NO Test for creates for a single client."
else
# We can use np = $NUM_CLIENTS to speed up the cleanup
mdsrate_cleanup $NUM_CLIENTS $MACHINEFILE $NUM_FILES $TESTDIR_SINGLE 'f%%d' --ignore
log "===== $0 ### 1 NODE CREATE ###"
COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --create --time ${TIME_PERIOD}
--nfiles ${NUM_FILES} --dir ${TESTDIR_SINGLE} --filefmt 'f%%d'"
echo "+ ${COMMAND}"
mpi_run -np 1 -machinefile ${MACHINEFILE} ${COMMAND} | tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
[ -f $LOG ] && sed -e "s/^/log: /" $LOG
error "mdsrate creates for a single client failed, aborting"
fi
log "===== $0 ### 1 NODE UNLINK ###"
if [ -f "$LOG" ]; then
CREATED=$(sed -n '/^Rate:/s/^.* \([0-9]*\) creates .*/\1/p' $LOG)
[ $CREATED -gt 0 ] && NUM_FILES=$CREATED
fi
COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --unlink
--nfiles ${NUM_FILES} --dir ${TESTDIR_SINGLE} --filefmt 'f%%d'"
echo "+ ${COMMAND}"
mpi_run -np 1 -machinefile ${MACHINEFILE} ${COMMAND} | tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
[ -f $LOG ] && sed -e "s/^/log: /" $LOG
error "mdsrate unlink on a single client failed, aborting"
fi
rmdir $TESTDIR_SINGLE
fi
IFree=$(mdsrate_inodes_available)
if [ $IFree -lt $NUM_FILES ]; then
NUM_FILES=$IFree
fi
[ $NUM_CLIENTS -eq 1 ] && NOMULTI=yes
if [ -n "$NOMULTI" ]; then
echo "NO test for create on multiple nodes."
else
mdsrate_cleanup $NUM_CLIENTS $MACHINEFILE $NUM_FILES $TESTDIR_MULTI 'f%%d' --ignore
log "===== $0 ### $NUM_CLIENTS NODES CREATE ###"
COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --create --time ${TIME_PERIOD}
--nfiles $NUM_FILES --dir ${TESTDIR_MULTI} --filefmt 'f%%d'"
echo "+ ${COMMAND}"
mpi_run -np ${NUM_CLIENTS} -machinefile ${MACHINEFILE} ${COMMAND} | tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
[ -f $LOG ] && sed -e "s/^/log: /" $LOG
error "mdsrate create on multiple nodes failed, aborting"
fi
log "===== $0 ### $NUM_CLIENTS NODES UNLINK ###"
if [ -f "$LOG" ]; then
CREATED=$(sed -n '/^Rate:/s/^.* \([0-9]*\) creates .*/\1/p' $LOG)
[ $CREATED -gt 0 ] && NUM_FILES=$CREATED
fi
COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --unlink
--nfiles ${NUM_FILES} --dir ${TESTDIR_MULTI} --filefmt 'f%%d'"
echo "+ ${COMMAND}"
mpi_run -np ${NUM_CLIENTS} -machinefile ${MACHINEFILE} ${COMMAND} | tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
[ -f $LOG ] && sed -e "s/^/log: /" $LOG
error "mdsrate unlink on multiple nodes failed, aborting"
fi
rmdir $TESTDIR_MULTI
fi
rmdir $BASEDIR || true
rm -f $MACHINEFILE
check_and_cleanup_lustre
#rm -f $LOG
exit 0
|
behlendorf/lustre
|
lustre/tests/mdsrate-create-large.sh
|
Shell
|
gpl-2.0
| 3,928 |
#!/bin/bash
set -e
#Setup common variables
export ARCH=arm
export CROSS_COMPILE=arm-linux-gnueabi-
export AS=${CROSS_COMPILE}as
export LD=${CROSS_COMPILE}ld
export CC=${CROSS_COMPILE}gcc
export AR=${CROSS_COMPILE}ar
export NM=${CROSS_COMPILE}nm
export STRIP=${CROSS_COMPILE}strip
export OBJCOPY=${CROSS_COMPILE}objcopy
export OBJDUMP=${CROSS_COMPILE}objdump
export LOCALVERSION=""
export MKBOOTIMG=${LICHEE_TOOLS_DIR}/pack/pctools/linux/android/mkbootimg
KERNEL_VERSION=`make -s kernelversion -C ./`
LICHEE_KDIR=`pwd`
LICHEE_MOD_DIR=${LICHEE_KDIR}/output/lib/modules/${KERNEL_VERSION}
export LICHEE_KDIR
update_kern_ver()
{
if [ -r include/generated/utsrelease.h ]; then
KERNEL_VERSION=`cat include/generated/utsrelease.h |awk -F\" '{print $2}'`
fi
LICHEE_MOD_DIR=${LICHEE_KDIR}/output/lib/modules/${KERNEL_VERSION}
}
show_help()
{
printf "
Build script for Lichee platform
Invalid Options:
help - show this help
kernel - build kernel
modules - build kernel module in modules dir
clean - clean kernel and modules
"
}
NAND_ROOT=${LICHEE_KDIR}/modules/nand
build_nand_lib()
{
echo "build nand library ${NAND_ROOT}/${LICHEE_CHIP}/lib"
if [ -d ${NAND_ROOT}/${LICHEE_CHIP}/lib ]; then
echo "build nand library now"
make -C modules/nand/${LICHEE_CHIP}/lib clean 2> /dev/null
make -C modules/nand/${LICHEE_CHIP}/lib lib install
else
echo "build nand with existing library"
fi
}
build_gpu_sun8i()
{
export LANG=en_US.UTF-8
unset LANGUAGE
make -C modules/mali LICHEE_MOD_DIR=${LICHEE_MOD_DIR} LICHEE_KDIR=${LICHEE_KDIR} \
install
}
build_gpu_sun8iw6()
{
if [ "x${LICHEE_PLATFORM}" = "xandroid" ] ; then
unset OUT
unset TOP
make -j16 -C modules/eurasia_km/eurasiacon/build/linux2/sunxi_android \
LICHEE_MOD_DIR=${LICHEE_MOD_DIR} LICHEE_KDIR=${LICHEE_KDIR}
for file in $(find modules/eurasia_km -name "*.ko"); do
cp $file ${LICHEE_MOD_DIR}
done
fi
}
build_gpu_sun9iw1()
{
if [ "x${LICHEE_PLATFORM}" = "xandroid" ] ; then
unset OUT
unset TOP
make -j16 -C modules/rogue_km/build/linux/sunxi_android \
LICHEE_MOD_DIR=${LICHEE_MOD_DIR} LICHEE_KDIR=${LICHEE_KDIR} \
RGX_BVNC=1.75.2.30 \
KERNELDIR=${LICHEE_KDIR}
for file in $(find modules/rogue_km -name "*.ko"); do
cp $file ${LICHEE_MOD_DIR}
done
fi
}
build_gpu()
{
chip_sw=`echo ${LICHEE_CHIP} | awk '{print substr($0,1,length($0)-2)}'`
echo build gpu module for ${chip_sw} ${LICHEE_PLATFORM}
if [ "${chip_sw}" = "sun9iw1" ]; then
build_gpu_sun9iw1
elif
[ "${chip_sw}" = "sun8iw3" ] ||
[ "${chip_sw}" = "sun8iw5" ] ||
[ "${chip_sw}" = "sun8iw7" ] ||
[ "${chip_sw}" = "sun8iw9" ]; then
build_gpu_sun8i
elif [ "${chip_sw}" = "sun8iw6" ] ; then
build_gpu_sun8iw6
fi
}
clean_gpu_sun9iw1()
{
unset OUT
unset TOP
make -C modules/rogue_km/build/linux/sunxi_android \
LICHEE_MOD_DIR=${LICHEE_MOD_DIR} LICHEE_KDIR=${LICHEE_KDIR} \
RGX_BVNC=1.75.2.30 \
KERNELDIR=${LICHEE_KDIR} clean
}
clean_gpu_sun8iw6()
{
unset OUT
unset TOP
make -C modules/eurasia_km/eurasiacon/build/linux2/sunxi_android \
LICHEE_MOD_DIR=${LICHEE_MOD_DIR} LICHEE_KDIR=${LICHEE_KDIR} clobber
}
clean_gpu()
{
chip_sw=`echo $LICHEE_CHIP | awk '{print substr($0,1,length($0)-2)}'`
echo
echo clean gpu module ${chip_sw} $LICHEE_PLATFORM
if [ "${chip_sw}" = "sun9iw1" ]; then
clean_gpu_sun9iw1
elif [ "${chip_sw}" = "sun8iw6" ]; then
clean_gpu_sun8iw6
fi
}
build_kernel()
{
echo "Building kernel"
cd ${LICHEE_KDIR}
rm -rf output/
echo "${LICHEE_MOD_DIR}"
mkdir -p ${LICHEE_MOD_DIR}
# echo "build_kernel LICHEE_KERN_DEFCONF" ${LICHEE_KERN_DEFCONF}
# We need to copy rootfs files to compile kernel for linux image
# cp -f rootfs.cpio.gz output/
if [ ! -f .config ] ; then
# printf "\n\033[0;31;1mUsing default config ${LICHEE_KERN_DEFCONF} ...\033[0m\n\n"
printf "\n\033[0;31;1mUsing default config sun8iw8p1smp_defconfig ...\033[0m\n\n"
# cp arch/arm/configs/${LICHEE_KERN_DEFCONF} .config
cp arch/arm/configs/sun8iw8p1smp_defconfig .config
fi
make ARCH=arm CROSS_COMPILE=${CROSS_COMPILE} -j${LICHEE_JLEVEL} uImage modules
update_kern_ver
#The Image is origin binary from vmlinux.
cp -vf arch/arm/boot/Image output/bImage
cp -vf arch/arm/boot/[zu]Image output/
cp .config output/
tar -jcf output/vmlinux.tar.bz2 vmlinux
if [ ! -f ./drivers/arisc/binary/arisc ]; then
echo "arisc" > ./drivers/arisc/binary/arisc
fi
cp ./drivers/arisc/binary/arisc output/
for file in $(find drivers sound crypto block fs security net -name "*.ko"); do
cp $file ${LICHEE_MOD_DIR}
done
cp -f Module.symvers ${LICHEE_MOD_DIR}
}
build_modules()
{
echo "Building modules"
if [ ! -f include/generated/utsrelease.h ]; then
printf "Please build kernel first!\n"
exit 1
fi
update_kern_ver
# build_nand_lib
# make -C modules/nand LICHEE_MOD_DIR=${LICHEE_MOD_DIR} LICHEE_KDIR=${LICHEE_KDIR} \
# CONFIG_CHIP_ID=${CONFIG_CHIP_ID} install
# build_gpu
}
regen_rootfs_cpio()
{
echo "regenerate rootfs cpio"
cd ${LICHEE_KDIR}/output
if [ -x "../scripts/build_rootfs.sh" ]; then
../scripts/build_rootfs.sh e ./rootfs.cpio.gz > /dev/null
else
echo "No such file: scripts/build_rootfs.sh"
exit 1
fi
mkdir -p ./skel/lib/modules/${KERNEL_VERSION}
if [ -e ${LICHEE_MOD_DIR}/nand.ko ]; then
cp ${LICHEE_MOD_DIR}/nand.ko ./skel/lib/modules/${KERNEL_VERSION}
if [ $? -ne 0 ]; then
echo "copy nand module error: $?"
exit 1
fi
fi
ko_file=`find ./skel/lib/modules/$KERNEL_VERSION/ -name *.ko`
if [ ! -z "$ko_file" ]; then
${STRIP} -d ./skel/lib/modules/$KERNEL_VERSION/*.ko
fi
rm -f rootfs.cpio.gz
../scripts/build_rootfs.sh c rootfs.cpio.gz > /dev/null
rm -rf skel
cd - > /dev/null
}
build_ramfs()
{
local bss_sz=0;
local CHIP="";
local BIMAGE="output/bImage";
local RAMDISK="output/rootfs.cpio.gz";
local BASE="";
local OFFSET="";
# update rootfs.cpio.gz with new module files
regen_rootfs_cpio
CHIP=`echo ${LICHEE_CHIP} | sed -e 's/.*\(sun[0-9x]i\).*/\1/g'`;
if [ "${CHIP}" = "sun9i" ]; then
BASE="0x20000000";
else
BASE="0x40000000";
fi
if [ -f vmlinux ]; then
bss_sz=`${CROSS_COMPILE}readelf -S vmlinux | \
awk '/\.bss/ {print strtonum("0x"$5)+strtonum("0x"$6)}'`;
fi
#bss_sz=`du -sb ${BIMAGE} | awk '{printf("%u", $1)}'`;
#
# If the size of bImage larger than 16M, will offset 0x02000000
#
if [ ${bss_sz} -gt $((16#1000000)) ]; then
OFFSET="0x02000000";
else
OFFSET="0x01000000";
fi
${MKBOOTIMG} --kernel ${BIMAGE} \
--ramdisk ${RAMDISK} \
--board ${CHIP} \
--base ${BASE} \
--ramdisk_offset ${OFFSET} \
-o output/boot.img
# If uboot use *bootm* to boot kernel, we should use uImage.
echo build_ramfs
echo "Copy boot.img to output directory ..."
cp output/boot.img ${LICHEE_PLAT_OUT}
cp output/vmlinux.tar.bz2 ${LICHEE_PLAT_OUT}
if [ ! -f output/arisc ]; then
echo "arisc" > output/arisc
fi
cp output/arisc ${LICHEE_PLAT_OUT}
}
gen_output()
{
if [ "x${LICHEE_PLATFORM}" = "xandroid" ] ; then
echo "Copy modules to target ..."
rm -rf ${LICHEE_PLAT_OUT}/lib
cp -rf ${LICHEE_KDIR}/output/* ${LICHEE_PLAT_OUT}
return
fi
if [ -d ${LICHEE_BR_OUT}/target ] ; then
echo "Copy modules to target ..."
local module_dir="${LICHEE_BR_OUT}/target/lib/modules"
rm -rf ${module_dir}
mkdir -p ${module_dir}
cp -rf ${LICHEE_MOD_DIR} ${module_dir}
fi
}
clean_kernel()
{
echo "Cleaning kernel"
make ARCH=arm clean
rm -rf output/*
(
export LANG=en_US.UTF-8
unset LANGUAGE
make -C modules/mali LICHEE_MOD_DIR=${LICHEE_MOD_DIR} LICHEE_KDIR=${LICHEE_KDIR} clean
)
}
clean_modules()
{
echo "Cleaning modules"
clean_gpu
}
#####################################################################
#
# Main Runtine
#
#####################################################################
#LICHEE_ROOT=`(cd ${LICHEE_KDIR}/..; pwd)`
#export PATH=${LICHEE_ROOT}/buildroot/output/external-toolchain/bin:${LICHEE_ROOT}/tools/pack/pctools/linux/android:$PATH
#if [ x$2 = x ];then
# echo Error! you show pass chip name as param 2
# exit 1
#else
# chip_name=$2
# platform_name=${chip_name:0:5}
#fi
LICHEE_ROOT=`(cd ${LICHEE_KDIR}/..; pwd)`
export PATH=${LICHEE_ROOT}/out/sun8iw8p1/linux/common/buildroot/external-toolchain/bin:${LICHEE_ROOT}/tools/pack/pctools/linux/android:$PATH
case "$1" in
kernel)
build_kernel
;;
modules)
build_modules
;;
clean)
clean_kernel
clean_modules
;;
*)
build_kernel
build_modules
# build_ramfs
# gen_output
echo -e "\n\033[0;31;1m${LICHEE_CHIP} compile Kernel successful\033[0m\n\n"
;;
esac
|
cubieboard/Cubieboard5-kernel-source
|
scripts/build_crane-standard.sh
|
Shell
|
gpl-2.0
| 8,730 |
#!/bin/bash
# Build the OMERO base VirtualBox image using Veewee.
# Example: ./build_base_image.sh Debian-7.7.0-amd64-omerobase
set -e -x
if [ $# -ne 1 ]; then
echo Usage: `basename $0` base_definition_name
exit 1
fi
if [ -d "$HOME/VirtualBox VMs/" ]; then
VBOXVMS="$HOME/VirtualBox VMs/"
else
echo "Cannot find VirtualBox VMs directory"
exit 2
fi
BASEBOX=$1
BUILD_NUMBER=${BUILD_NUMBER:-DEV}
KEEP_VM=0
# /usr/sbin may not be in PATH, so specify full path to lsof
LSOF=${LSOF:-/usr/sbin/lsof}
# Veewee/VirtualBox sometimes exits before releasing the underlying files
# which can cause locking issues or file corruption. This is an attempt to
# work around it.
wait_for_vbox()
{
if [ ! -f "$1" ]; then
echo "ERROR: Invalid file"
exit 2
fi
if [ ! -x "$LSOF" ]; then
echo "ERROR: Unable to find lsof"
exit 2
fi
RET=0
set +e
echo -n "Waiting for VBox to release file "
while [ $RET -eq 0 ]; do
echo -n .
sleep 5
"$LSOF" -Fc "$1" |grep VBoxHeadless
RET=$?
done
echo
set -e
}
# Setup the Ruby environment
. ~/.rvm/scripts/rvm
rvm use 1.9.3
# Install Ruby Gem dependencies if not already present
bundle install
# Build the box
bundle exec veewee vbox build --force "$BASEBOX" --nogui
bundle exec veewee vbox halt "$BASEBOX"
# At this point there should be a new VirtualBox machine in the VirtualBox
# directory, for example under
# `~/VirtualBox VMs/Debian-7.1.0-amd64-omerobase/`.
#
# If you want to keep the base VM then clone the VDI to another directory,
# do not just copy the VDI since it contains a UUID registered to the base
# image VM. Note the VDI will remain registered to VirtualBox as a hard disk.
#
# Alternatively copy the VDI and delete the original VM
# VBox may append a number to the disk image name
SOURCE=$(ls "$VBOXVMS/${BASEBOX}/${BASEBOX}"*.vdi)
DEST="$PWD/${BASEBOX}-b${BUILD_NUMBER}.vdi"
wait_for_vbox "$SOURCE"
if [ $KEEP_VM -eq 0 ]; then
cp "$SOURCE" "$DEST"
bundle exec veewee vbox destroy "$BASEBOX"
else
VBoxManage clonehd "$SOURCE" "$DEST"
fi
echo "Base image: $DEST"
|
manics/ome-veewee
|
build_base_image.sh
|
Shell
|
gpl-2.0
| 2,128 |
#! /bin/sh
# Copyright (C) 2010-2014 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Test to make sure AC_CONFIG_AUX_DIR works correctly.
# This test calls AC_CONFIG_AUX_DIR with an explicit literal argument,
# thus explicitly making the directory named by that argument the
# config auxdir.
# Keep this in sync with sister tests 'auxdir7.sh' and 'auxdir8.sh'.
. test-init.sh
cat > configure.ac <<END
AC_INIT([$me], [1.0])
AC_CONFIG_AUX_DIR([auxdir])
AM_INIT_AUTOMAKE
AC_CONFIG_FILES([Makefile subdir/Makefile])
END
mkdir subdir auxdir
cat > Makefile.am << 'END'
pkgdata_DATA =
END
cp Makefile.am subdir/Makefile.am
: > auxdir/mkinstalldirs
: > auxdir/install-sh
: > auxdir/missing
$ACLOCAL
$AUTOMAKE
$FGREP '$(top_srcdir)/auxdir/mkinstalldirs' Makefile.in
$FGREP '$(top_srcdir)/auxdir/mkinstalldirs' subdir/Makefile.in
:
|
kuym/openocd
|
tools/automake-1.15/t/auxdir6.sh
|
Shell
|
gpl-2.0
| 1,438 |
#!/bin/bash
# This script rebuilds customized layer of Dojo for tt-rss
# Place unpacked Dojo source release in this directory and run this script.
# It will automatically replace previous build of Dojo in ../dojo
# Dojo requires Java runtime to build. Further information on rebuilding Dojo
# is available here: http://dojotoolkit.org/reference-guide/build/index.html
VERSION=1.12.1
# Download and extract dojo src code if it doesn't already exist
if [ ! -d "dojo" ]; then
TARBALL=dojo-release-$VERSION-src.tar.gz
if [ ! -f $TARBALL ]; then
wget -q http://download.dojotoolkit.org/release-$VERSION/$TARBALL
fi
tar -zxf $TARBALL
mv dojo-release-$VERSION-src/* .
rm -rf dojo-release-$VERSION-src
fi
if [ -d util/buildscripts/ ]; then
rm -rf release/dojo
pushd util/buildscripts
./build.sh profile=../../tt-rss action=release optimize=shrinksafe cssOptimize=comments
popd
if [ -d release/dojo ]; then
rm -rf ../dojo ../dijit
cp -r release/dojo/dojo ..
cp -r release/dojo/dijit ..
cd ..
find dojo -name '*uncompressed*' -exec rm -- {} \;
find dijit -name '*uncompressed*' -exec rm -- {} \;
else
echo $0: ERROR: Dojo build seems to have failed.
fi
else
echo $0: ERROR: Please unpack Dojo source release into current directory.
fi
|
maerco/tinytinyrss
|
lib/dojo-src/rebuild-dojo.sh
|
Shell
|
gpl-3.0
| 1,291 |
#! /bin/sh
adb logcat | ndk-stack -sym ../obj/local/armeabi-v7a > crashdump.log
|
fzurita/mupen64plus-ae
|
tools/ndk-stack.sh
|
Shell
|
gpl-3.0
| 81 |
#!/bin/bash
# This attempts to stop all your emulators (on Ubuntu 18.10 at least...) nicely
#
# ...then if they don't stop it kills them
for EMU_ID in `adb devices -l | grep emulator | cut -d' ' -f1`; do
echo Stopping emulator $EMU_ID...
adb -s $EMU_ID emu kill
done
sleep 10
for PID in `ps -eo pid,cmd,args |grep emulator|grep Android|grep -v bash|grep -v crash|grep -v grep|cut -d/ -f1`; do
echo "Stopping emulator with $PID..."
kill $PID
done
|
ankidroid/Anki-Android
|
tools/quality-check/stop_all_emulators.sh
|
Shell
|
gpl-3.0
| 457 |
#! /bin/sh
$XGETTEXT text_vcard.cpp rc.cpp -o $podir/kmail_text_vcard_plugin.pot
$XGETTEXT text_calendar.cpp attendeeselector.cpp delegateselector.cpp rc.cpp -o $podir/kmail_text_calendar_plugin.pot
$XGETTEXT text_xdiff.cpp rc.cpp -o $podir/kmail_text_xdiff_plugin.pot
|
lefou/kdepim-noakonadi
|
plugins/kmail/bodypartformatter/Messages.sh
|
Shell
|
lgpl-2.1
| 269 |
#! /usr/bin/env bash
#
# Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0 which
# accompanies this distribution and is available at
# http://www.opensource.org/licenses/apache2.0.php.
#
# This script will attempt to install the client dependencies for hbase
# into a given directory. Usually this is used to install the deps into either the
# geomesa tools lib dir or the WEB-INF/lib dir of geoserver.
hbase_version="%%hbase.version%%"
hadoop_version="%%hadoop.version.recommended%%"
# Load common functions and setup
if [ -z "${%%gmtools.dist.name%%_HOME}" ]; then
export %%gmtools.dist.name%%_HOME="$(cd "`dirname "$0"`"/..; pwd)"
fi
. $%%gmtools.dist.name%%_HOME/bin/common-functions.sh
install_dir="${1:-${%%gmtools.dist.name%%_HOME}/lib}"
# Resource download location
base_url="${GEOMESA_MAVEN_URL:-https://search.maven.org/remotecontent?filepath=}"
declare -a urls=(
"${base_url}org/apache/hbase/hbase-annotations/${hbase_version}/hbase-annotations-${hbase_version}.jar"
"${base_url}org/apache/hbase/hbase-client/${hbase_version}/hbase-client-${hbase_version}.jar"
"${base_url}org/apache/hbase/hbase-common/${hbase_version}/hbase-common-${hbase_version}.jar"
"${base_url}org/apache/hbase/hbase-hadoop2-compat/${hbase_version}/hbase-hadoop2-compat-${hbase_version}.jar"
"${base_url}org/apache/hbase/hbase-hadoop-compat/${hbase_version}/hbase-hadoop-compat-${hbase_version}.jar"
"${base_url}org/apache/hbase/hbase-prefix-tree/${hbase_version}/hbase-prefix-tree-${hbase_version}.jar"
"${base_url}org/apache/hbase/hbase-procedure/${hbase_version}/hbase-procedure-${hbase_version}.jar"
"${base_url}org/apache/hbase/hbase-protocol/${hbase_version}/hbase-protocol-${hbase_version}.jar"
"${base_url}org/apache/hbase/hbase-server/${hbase_version}/hbase-server-${hbase_version}.jar"
"${base_url}commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar"
"${base_url}org/apache/hadoop/hadoop-annotations/${hadoop_version}/hadoop-annotations-${hadoop_version}.jar"
"${base_url}org/apache/hadoop/hadoop-auth/${hadoop_version}/hadoop-auth-${hadoop_version}.jar"
"${base_url}org/apache/hadoop/hadoop-client/${hadoop_version}/hadoop-client-${hadoop_version}.jar"
"${base_url}org/apache/hadoop/hadoop-common/${hadoop_version}/hadoop-common-${hadoop_version}.jar"
"${base_url}org/apache/hadoop/hadoop-hdfs/${hadoop_version}/hadoop-hdfs-${hadoop_version}.jar"
)
downloadUrls "$install_dir" urls[@]
|
jahhulbert-ccri/geomesa
|
geomesa-bigtable/geomesa-bigtable-tools/bin/install-hadoop-hbase.sh
|
Shell
|
apache-2.0
| 2,595 |
#!/bin/bash
# Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eu
# Main deploy functions for the continuous build system
# Just source this file and use the various method:
# bazel_build build bazel and run all its test
# bazel_release use the artifact generated by bazel_build and push
# them to github for a release and to GCS for a release candidate.
# Also prepare an email for announcing the release.
# Load common.sh
BUILD_SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$(dirname ${BUILD_SCRIPT_DIR})/release/common.sh"
source "$(dirname ${BUILD_SCRIPT_DIR})/release/relnotes.sh"
if ! command -v gsutil &>/dev/null; then
echo "Required tool 'gsutil' not found. Please install it:"
echo "See https://cloud.google.com/sdk/downloads for instructions."
exit 1
fi
if ! command -v github-release &>/dev/null; then
echo "Required tool 'github-release' not found. Download it from here:"
echo "https://github.com/c4milo/github-release/releases"
echo "Just extract the archive and put the binary on your PATH."
exit 1
fi
if ! command -v debsign &>/dev/null; then
echo "Required tool 'debsign' not found. Please install it via apt-get:"
echo "apt-get install devscripts"
exit 1
fi
if ! command -v reprepro &>/dev/null; then
echo "Required tool 'reprepro' not found. Please install it via apt-get:"
echo "apt-get install reprepro"
exit 1
fi
if ! command -v gpg &>/dev/null; then
echo "Required tool 'gpg' not found. Please install it via apt-get:"
echo "apt-get install gnupg"
exit 1
fi
if ! command -v pandoc &>/dev/null; then
echo "Required tool 'pandoc' not found. Please install it via apt-get:"
echo "apt-get install pandoc"
exit 1
fi
# if ! command -v ssmtp &>/dev/null; then
# echo "Required tool 'ssmtp' not found. Please install it via apt-get:"
# echo "apt-get install ssmtp"
# exit 1
# fi
export APT_GPG_KEY_ID=$(gsutil cat gs://bazel-trusted-encrypted-secrets/release-key.gpg.id)
# Generate a string from a template and a list of substitutions.
# The first parameter is the template name and each subsequent parameter
# is taken as a couple: first is the string the substitute and the second
# is the result of the substitution.
function generate_from_template() {
local value="$1"
shift
while (( $# >= 2 )); do
value="${value//$1/$2}"
shift 2
done
echo "${value}"
}
# Generate the email for the release.
# The first line of the output will be the recipient, the second line
# the mail subjects and the subsequent lines the mail, its content.
# If no planed release, then this function output will be empty.
function generate_email() {
RELEASE_CANDIDATE_URL="https://releases.bazel.build/%release_name%/rc%rc%/index.html"
RELEASE_URL="https://github.com/bazelbuild/bazel/releases/tag/%release_name%"
if [ "$(is_rolling_release)" -eq 1 ]; then
echo "No emails for rolling releases"
return 0
fi
local release_name=$(get_release_name)
local rc=$(get_release_candidate)
local args=(
"%release_name%" "${release_name}"
"%rc%" "${rc}"
"%relnotes%" "# $(get_full_release_notes)"
)
if [ -n "${rc}" ]; then
args+=(
"%url%" "$(generate_from_template "${RELEASE_CANDIDATE_URL}" "${args[@]}")"
)
generate_from_template \
"$(cat "${BUILD_SCRIPT_DIR}/rc_email.txt")" \
"${args[@]}"
elif [ -n "${release_name}" ]; then
args+=(
"%url%" "$(generate_from_template "${RELEASE_URL}" "${args[@]}")"
)
generate_from_template \
"$(cat "${BUILD_SCRIPT_DIR}/release_email.txt")" "${args[@]}"
fi
}
function get_release_page() {
echo "# $(get_full_release_notes)"'
_Notice_: Bazel installers contain binaries licensed under the GPLv2 with
Classpath exception. Those installers should always be redistributed along with
the source code.
Some versions of Bazel contain a bundled version of OpenJDK. The license of the
bundled OpenJDK and other open-source components can be displayed by running
the command `bazel license`. The vendor and version information of the bundled
OpenJDK can be displayed by running the command `bazel info java-runtime`.
The binaries and source-code of the bundled OpenJDK can be
[downloaded from our mirror server](https://mirror.bazel.build/openjdk/index.html).
_Security_: All our binaries are signed with our
[public key](https://bazel.build/bazel-release.pub.gpg) 3D5919B448457EE0.
'
}
# Deploy a github release using a third party tool:
# https://github.com/c4milo/github-release
# This methods expects the following arguments:
# $1..$n files generated by package_build (should not contains the README file)
# Please set GITHUB_TOKEN to talk to the Github API.
function release_to_github() {
local artifact_dir="$1"
local release_name=$(get_release_name)
local rc=$(get_release_candidate)
if [ -n "${release_name}" ] && [ -z "${rc}" ]; then
local github_token="$(gsutil cat gs://bazel-trusted-encrypted-secrets/github-trusted-token.enc | \
gcloud kms decrypt --project bazel-public --location global --keyring buildkite --key github-trusted-token --ciphertext-file - --plaintext-file -)"
if [ "$(is_rolling_release)" -eq 1 ]; then
GITHUB_TOKEN="${github_token}" github-release -prerelease "bazelbuild/bazel" "${release_name}" "" "$(get_release_page)" "${artifact_dir}/*"
else
GITHUB_TOKEN="${github_token}" github-release "bazelbuild/bazel" "${release_name}" "" "$(get_release_page)" "${artifact_dir}/*"
fi
fi
}
# Creates an index of the files contained in folder $1 in Markdown format.
function create_index_md() {
# First, add the release notes
get_release_page
# Then, add the list of files
echo
echo "## Index of files"
echo
for f in $1/*.sha256; do # just list the sha256 ones
local filename=$(basename $f .sha256);
echo " - [${filename}](${filename}) [[SHA-256](${filename}.sha256)] [[SIG](${filename}.sig)]"
done
}
# Creates an index of the files contained in folder $1 in HTML format.
function create_index_html() {
create_index_md "${@}" | pandoc -f markdown -t html
}
# Deploy a release candidate to Google Cloud Storage.
# It requires to have gsutil installed. You can force the path to gsutil
# by setting the GSUTIL environment variable.
# This methods expects the following arguments:
# $1..$n files generated by package_build
function release_to_gcs() {
local artifact_dir="$1"
local release_name="$(get_release_name)"
local rc="$(get_release_candidate)"
if [ -n "${release_name}" ]; then
local release_path="${release_name}/release"
if [ "$(is_rolling_release)" -eq 1 ]; then
# Store rolling releases and their RCs in the same directory (for simplicity)
release_path="$(get_lts_name)/rolling/$(get_full_release_name)"
elif [ -n "${rc}" ]; then
release_path="${release_name}/rc${rc}"
fi
create_index_html "${artifact_dir}" > "${artifact_dir}/index.html"
gsutil -m cp "${artifact_dir}/**" "gs://bazel/${release_path}"
fi
}
function ensure_gpg_secret_key_imported() {
if ! gpg --list-secret-keys | grep "${APT_GPG_KEY_ID}" > /dev/null; then
keyfile=$(mktemp --tmpdir)
chmod 0600 "${keyfile}"
gsutil cat "gs://bazel-trusted-encrypted-secrets/release-key.gpg.enc" | \
gcloud kms decrypt --location "global" --keyring "buildkite" --key "bazel-release-key" --ciphertext-file "-" --plaintext-file "${keyfile}"
gpg --allow-secret-key-import --import "${keyfile}"
rm -f "${keyfile}"
fi
# Make sure we use stronger digest algorithm。
# We use reprepro to generate the debian repository,
# but there's no way to pass flags to gpg using reprepro, so writting it into
# ~/.gnupg/gpg.conf
if ! grep "digest-algo sha256" ~/.gnupg/gpg.conf > /dev/null; then
echo "digest-algo sha256" >> ~/.gnupg/gpg.conf
fi
}
# Generate new content of Release file
function print_new_release_content() {
local distribution="$1"
# Print the headers of the original Release file
cat <<EOF
Origin: Bazel Authors
Label: Bazel
Codename: $1
Date: $(date -u "+%a, %d %b %Y %H:%M:%S UTC")
Architectures: amd64
Components: jdk1.8
Description: Bazel APT Repository
EOF
metadata_files=("jdk1.8/binary-amd64/Packages" "jdk1.8/binary-amd64/Packages.gz" "jdk1.8/binary-amd64/Release" "jdk1.8/source/Sources.gz" "jdk1.8/source/Release")
# Re-generate hashes for all metadata fiels
echo MD5Sum:
for file in ${metadata_files[*]}; do
path="dists/${distribution}/$file"
echo "" "$(md5sum ${path} | cut -d " " -f1)" "$(ls -l ${path} | cut -d " " -f5)" "$file"
done
echo SHA1:
for file in ${metadata_files[*]}; do
path="dists/${distribution}/$file"
echo "" "$(sha1sum ${path} | cut -d " " -f1)" "$(ls -l ${path} | cut -d " " -f5)" "$file"
done
echo SHA256:
for file in ${metadata_files[*]}; do
path="dists/${distribution}/$file"
echo "" "$(sha256sum ${path} | cut -d " " -f1)" "$(ls -l ${path} | cut -d " " -f5)" "$file"
done
}
# Merge metadata with previous distribution
function merge_previous_dists() {
local distribution="$1"
# Download the metadata info from previous distribution
mkdir -p previous
gsutil -m cp -r "gs://bazel-apt/dists" "./previous"
# Merge Packages and Packages.gz file
cat "previous/dists/${distribution}/jdk1.8/binary-amd64/Packages" >> "dists/${distribution}/jdk1.8/binary-amd64/Packages"
gzip -9c "dists/${distribution}/jdk1.8/binary-amd64/Packages" > "dists/${distribution}/jdk1.8/binary-amd64/Packages.gz"
# Merge Sources.gz file
gunzip "previous/dists/${distribution}/jdk1.8/source/Sources.gz"
gunzip "dists/${distribution}/jdk1.8/source/Sources.gz"
cat "previous/dists/${distribution}/jdk1.8/source/Sources" >> "dists/${distribution}/jdk1.8/source/Sources"
gzip -9c "dists/${distribution}/jdk1.8/source/Sources" > "dists/${distribution}/jdk1.8/source/Sources.gz"
rm -f "dists/${distribution}/jdk1.8/source/Sources"
# Update Release file
print_new_release_content "${distribution}" > "dists/${distribution}/Release.new"
mv "dists/${distribution}/Release.new" "dists/${distribution}/Release"
# Generate new signatures for Release file
rm -f "dists/${distribution}/InRelease" "dists/${distribution}/Release.gpg"
gpg --output "dists/${distribution}/InRelease" --clearsign "dists/${distribution}/Release"
gpg --output "dists/${distribution}/Release.gpg" --detach-sign "dists/${distribution}/Release"
}
# Create a debian package with version in package name and add it to the repo
function add_versioned_deb_pkg() {
local distribution="$1"
local deb_pkg_name="$2"
# Extract the original package
mkdir -p deb-old
dpkg-deb -R "${deb_pkg_name}" deb-old
# Get bazel version
bazel_version=$(grep "Version:" deb-old/DEBIAN/control | cut -d " " -f2)
bazel_version=${bazel_version/\~/}
# Generate new control file
mkdir -p deb-new/DEBIAN
sed "s/Package:\ bazel/Package:\ bazel-${bazel_version}/g" "deb-old/DEBIAN/control" > "deb-new/DEBIAN/control"
# Rename the actual Bazel binary to bazel-${bazel_version}
mkdir -p deb-new/usr/bin
cp "deb-old/usr/bin/bazel-real" "deb-new/usr/bin/bazel-${bazel_version}"
# Re-pack the debian package and add it to the repo
versioned_deb_pkg_name="bazel-${bazel_version}-versioned-package-amd64.deb"
chmod -R 0755 deb-new
dpkg-deb -b deb-new "${versioned_deb_pkg_name}"
reprepro -C jdk1.8 includedeb "${distribution}" "${versioned_deb_pkg_name}"
}
function create_apt_repository() {
mkdir conf
cat > conf/distributions <<EOF
Origin: Bazel Authors
Label: Bazel
Codename: stable
Architectures: amd64 source
Components: jdk1.8
Description: Bazel APT Repository
DebOverride: override.stable
DscOverride: override.stable
SignWith: ${APT_GPG_KEY_ID}
Origin: Bazel Authors
Label: Bazel
Codename: testing
Architectures: amd64 source
Components: jdk1.8
Description: Bazel APT Repository
DebOverride: override.testing
DscOverride: override.testing
SignWith: ${APT_GPG_KEY_ID}
EOF
cat > conf/options <<EOF
verbose
ask-passphrase
basedir .
EOF
# TODO(#2264): this is a quick workaround #2256, figure out a correct fix.
cat > conf/override.stable <<EOF
bazel Section contrib/devel
bazel Priority optional
EOF
cat > conf/override.testing <<EOF
bazel Section contrib/devel
bazel Priority optional
EOF
ensure_gpg_secret_key_imported
local distribution="$1"
local deb_pkg_name="$2"
local deb_dsc_name="$3"
debsign -k "${APT_GPG_KEY_ID}" "${deb_dsc_name}"
reprepro -C jdk1.8 includedeb "${distribution}" "${deb_pkg_name}"
reprepro -C jdk1.8 includedsc "${distribution}" "${deb_dsc_name}"
add_versioned_deb_pkg "${distribution}" "${deb_pkg_name}"
merge_previous_dists "${distribution}"
gsutil -m cp -r dists pool "gs://bazel-apt"
}
function release_to_apt() {
local artifact_dir="$1"
local release_name="$(get_release_name)"
local rc="$(get_release_candidate)"
if [ -n "${release_name}" ]; then
local release_label="$(get_full_release_name)"
local deb_pkg_name="${release_name}/bazel_${release_label}-linux-x86_64.deb"
local deb_dsc_name="${release_name}/bazel_${release_label}.dsc"
local deb_tar_name="${release_name}/bazel_${release_label}.tar.gz"
pushd "${artifact_dir}"
if [ -n "${rc}" ]; then
create_apt_repository testing "${deb_pkg_name}" "${deb_dsc_name}"
else
create_apt_repository stable "${deb_pkg_name}" "${deb_dsc_name}"
fi
popd
fi
}
# A wrapper around the release deployment methods.
function deploy_release() {
local release_label="$(get_full_release_name)"
local release_name="$(get_release_name)"
if [[ ! -d $1 ]]; then
echo "Usage: deploy_release ARTIFACT_DIR"
exit 1
fi
artifact_dir="$1"
if [[ -z $release_name ]]; then
echo "Could not get the release name - are you in a release branch directory?"
exit 1
fi
ensure_gpg_secret_key_imported
rm -f "${artifact_dir}"/*.{sha256,sig}
for file in "${artifact_dir}"/*; do
(cd "${artifact_dir}" && sha256sum "$(basename "${file}")" > "${file}.sha256")
gpg --no-tty --detach-sign -u "${APT_GPG_KEY_ID}" "${file}"
done
if [ "$(is_rolling_release)" -eq 0 ]; then
apt_working_dir="$(mktemp -d --tmpdir)"
echo "apt_working_dir = ${apt_working_dir}"
mkdir "${apt_working_dir}/${release_name}"
cp "${artifact_dir}/bazel_${release_label}-linux-x86_64.deb" "${apt_working_dir}/${release_name}"
cp "${artifact_dir}/bazel_${release_label}.dsc" "${apt_working_dir}/${release_name}"
cp "${artifact_dir}/bazel_${release_label}.tar.gz" "${apt_working_dir}/${release_name}"
release_to_apt "${apt_working_dir}"
fi
gcs_working_dir="$(mktemp -d --tmpdir)"
echo "gcs_working_dir = ${gcs_working_dir}"
cp "${artifact_dir}"/* "${gcs_working_dir}"
release_to_gcs "${gcs_working_dir}"
github_working_dir="$(mktemp -d --tmpdir)"
echo "github_working_dir = ${github_working_dir}"
cp "${artifact_dir}"/* "${github_working_dir}"
rm -f "${github_working_dir}/bazel_${release_label}"*.{dsc,tar.gz}{,.sha256,.sig}
release_to_github "${github_working_dir}"
}
|
bazelbuild/bazel
|
scripts/ci/build.sh
|
Shell
|
apache-2.0
| 15,655 |
#!/usr/bin/env bash
declare -x GITEA_TIME_FORMAT
[[ -z "${GITEA_TIME_FORMAT}" ]] && GITEA_TIME_FORMAT=""
declare -x GITEA_TIME_DEFAULT_UI_LOCATION
[[ -z "${GITEA_TIME_DEFAULT_UI_LOCATION}" ]] && GITEA_TIME_DEFAULT_UI_LOCATION=""
|
dockhippie/gitea
|
v1.12/overlay/etc/entrypoint.d/19-time.sh
|
Shell
|
mit
| 231 |
#!/bin/bash
WD=
function on_exit { [ -n "$WD" ] && /bin/rm -rf "$WD"; }
WD=$(mktemp -d "/tmp/planet-opensuse-crush.XXXXXXXX")
for d in website/images website/hackergotchi; do
pngcrush -q -d "$WD/" "$d"/*.png
for f in "$WD"/*.png; do
[ -e "$f" ] || continue
size_crushed=$(stat -c '%s' "$f")
size_orig=$(stat -c '%s' "$d/${f##*/}")
if [ "$size_crushed" -lt "$size_orig" ]; then
/bin/mv -v "$f" "$d/${f##*/}"
else
/bin/rm "$f"
fi
done
done
|
Gschlotter/planet.opensuse.org
|
crush.sh
|
Shell
|
gpl-2.0
| 524 |
#! /bin/sh
# Copyright (C) 2002-2013 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Make sure that installing subdirectory libtool libraries works.
# PR/300
required='cc libtoolize'
. test-init.sh
cat >> configure.ac << 'END'
AC_PROG_CC
AM_PROG_AR
AC_PROG_LIBTOOL
AC_OUTPUT
END
cat > Makefile.am << 'END'
lib_LTLIBRARIES = subdir/liba.la
subdir_liba_la_SOURCES = a.c
nobase_lib_LTLIBRARIES = subdir/libb.la
subdir_libb_la_SOURCES = a.c
END
cat > a.c << 'END'
int i = 3;
END
libtoolize
$ACLOCAL
$AUTOCONF
$AUTOMAKE --copy --add-missing
# We pass '--libdir' explicitly, to avoid spurious failures due to users
# or distributions possibly overriding '${libdir}' in their $CONFIG_SITE
# file (for example, defining it to '${prefix}/lib64' on 64-bit systems,
# as is the case with openSUSE 12.1). See automake bug#10426.
cwd=$(pwd) || fatal_ "getting current working directory"
./configure --prefix "$cwd/inst" --libdir "$cwd/inst/lib"
# A rule in the Makefile should create subdir.
test ! -e subdir
run_make -O
test -d subdir
grep 'liba.la .*-rpath .*lib' stdout
grep 'liba.la .*-rpath .*lib/subdir' stdout && exit 1
grep 'libb.la .*-rpath .*lib/subdir' stdout
test -f subdir/liba.la
test -f subdir/libb.la
run_make -E install
grep 'remember.*--finish' stderr && exit 1
test -f inst/lib/liba.la
test -f inst/lib/subdir/libb.la
$MAKE uninstall
test -f inst/lib/liba.la && exit 1
test -f inst/lib/subdir/libb.la && exit 1
$MAKE install-strip
test -f inst/lib/liba.la
test -f inst/lib/subdir/libb.la
:
|
DDTChen/CookieVLC
|
vlc/extras/tools/automake/t/pr300-ltlib.sh
|
Shell
|
gpl-2.0
| 2,121 |
# Set the different path for this activity
# This is sourced by runit.sh
path=$1
activity=chat
plugindir=$path/../boards/.libs
pythonplugindir=$path
resourcedir=$path/resources
section="/fun"
|
keshashah/GCompris
|
src/chat-activity/init_path.sh
|
Shell
|
gpl-2.0
| 193 |
#!/bin/sh
#
# Copyright (c) 2005 Junio C Hamano.
#
USAGE='[--interactive | -i] [-v] [--force-rebase | -f] [--no-ff] [--onto <newbase>] [<upstream>|--root] [<branch>] [--quiet | -q]'
LONG_USAGE='git-rebase replaces <branch> with a new branch of the
same name. When the --onto option is provided the new branch starts
out with a HEAD equal to <newbase>, otherwise it is equal to <upstream>
It then attempts to create a new commit for each commit from the original
<branch> that does not exist in the <upstream> branch.
It is possible that a merge failure will prevent this process from being
completely automatic. You will have to resolve any such merge failure
and run git rebase --continue. Another option is to bypass the commit
that caused the merge failure with git rebase --skip. To check out the
original <branch> and remove the .git/rebase-apply working files, use the
command git rebase --abort instead.
Note that if <branch> is not specified on the command line, the
currently checked out branch is used.
Example: git-rebase master~1 topic
A---B---C topic A'\''--B'\''--C'\'' topic
/ --> /
D---E---F---G master D---E---F---G master
'
SUBDIRECTORY_OK=Yes
OPTIONS_KEEPDASHDASH=
OPTIONS_SPEC="\
git rebase [-i] [options] [--onto <newbase>] [<upstream>] [<branch>]
git rebase [-i] [options] --onto <newbase> --root [<branch>]
git-rebase [-i] --continue | --abort | --skip
--
Available options are
v,verbose! display a diffstat of what changed upstream
q,quiet! be quiet. implies --no-stat
onto=! rebase onto given branch instead of upstream
p,preserve-merges! try to recreate merges instead of ignoring them
s,strategy=! use the given merge strategy
no-ff! cherry-pick all commits, even if unchanged
m,merge! use merging strategies to rebase
i,interactive! let the user edit the list of commits to rebase
k,keep-empty preserve empty commits during rebase
f,force-rebase! force rebase even if branch is up to date
X,strategy-option=! pass the argument through to the merge strategy
stat! display a diffstat of what changed upstream
n,no-stat! do not show diffstat of what changed upstream
verify allow pre-rebase hook to run
rerere-autoupdate allow rerere to update index with resolved conflicts
root! rebase all reachable commits up to the root(s)
autosquash move commits that begin with squash!/fixup! under -i
committer-date-is-author-date! passed to 'git am'
ignore-date! passed to 'git am'
whitespace=! passed to 'git apply'
ignore-whitespace! passed to 'git apply'
C=! passed to 'git apply'
Actions:
continue! continue
abort! abort and check out the original branch
skip! skip current patch and continue
"
. git-sh-setup
set_reflog_action rebase
require_work_tree_exists
cd_to_toplevel
LF='
'
ok_to_skip_pre_rebase=
resolvemsg="
When you have resolved this problem run \"git rebase --continue\".
If you would prefer to skip this patch, instead run \"git rebase --skip\".
To check out the original branch and stop rebasing run \"git rebase --abort\".
"
unset onto
strategy=
strategy_opts=
do_merge=
merge_dir="$GIT_DIR"/rebase-merge
apply_dir="$GIT_DIR"/rebase-apply
verbose=
diffstat=
test "$(git config --bool rebase.stat)" = true && diffstat=t
git_am_opt=
rebase_root=
force_rebase=
allow_rerere_autoupdate=
# Non-empty if a rebase was in progress when 'git rebase' was invoked
in_progress=
# One of {am, merge, interactive}
type=
# One of {"$GIT_DIR"/rebase-apply, "$GIT_DIR"/rebase-merge}
state_dir=
# One of {'', continue, skip, abort}, as parsed from command line
action=
preserve_merges=
autosquash=
keep_empty=
test "$(git config --bool rebase.autosquash)" = "true" && autosquash=t
read_basic_state () {
head_name=$(cat "$state_dir"/head-name) &&
onto=$(cat "$state_dir"/onto) &&
# We always write to orig-head, but interactive rebase used to write to
# head. Fall back to reading from head to cover for the case that the
# user upgraded git with an ongoing interactive rebase.
if test -f "$state_dir"/orig-head
then
orig_head=$(cat "$state_dir"/orig-head)
else
orig_head=$(cat "$state_dir"/head)
fi &&
GIT_QUIET=$(cat "$state_dir"/quiet) &&
test -f "$state_dir"/verbose && verbose=t
test -f "$state_dir"/strategy && strategy="$(cat "$state_dir"/strategy)"
test -f "$state_dir"/strategy_opts &&
strategy_opts="$(cat "$state_dir"/strategy_opts)"
test -f "$state_dir"/allow_rerere_autoupdate &&
allow_rerere_autoupdate="$(cat "$state_dir"/allow_rerere_autoupdate)"
}
write_basic_state () {
echo "$head_name" > "$state_dir"/head-name &&
echo "$onto" > "$state_dir"/onto &&
echo "$orig_head" > "$state_dir"/orig-head &&
echo "$GIT_QUIET" > "$state_dir"/quiet &&
test t = "$verbose" && : > "$state_dir"/verbose
test -n "$strategy" && echo "$strategy" > "$state_dir"/strategy
test -n "$strategy_opts" && echo "$strategy_opts" > \
"$state_dir"/strategy_opts
test -n "$allow_rerere_autoupdate" && echo "$allow_rerere_autoupdate" > \
"$state_dir"/allow_rerere_autoupdate
}
output () {
case "$verbose" in
'')
output=$("$@" 2>&1 )
status=$?
test $status != 0 && printf "%s\n" "$output"
return $status
;;
*)
"$@"
;;
esac
}
move_to_original_branch () {
case "$head_name" in
refs/*)
message="rebase finished: $head_name onto $onto"
git update-ref -m "$message" \
$head_name $(git rev-parse HEAD) $orig_head &&
git symbolic-ref \
-m "rebase finished: returning to $head_name" \
HEAD $head_name ||
die "Could not move back to $head_name"
;;
esac
}
run_specific_rebase () {
if [ "$interactive_rebase" = implied ]; then
GIT_EDITOR=:
export GIT_EDITOR
autosquash=
fi
. git-rebase--$type
}
run_pre_rebase_hook () {
if test -z "$ok_to_skip_pre_rebase" &&
test -x "$GIT_DIR/hooks/pre-rebase"
then
"$GIT_DIR/hooks/pre-rebase" ${1+"$@"} ||
die "The pre-rebase hook refused to rebase."
fi
}
test -f "$apply_dir"/applying &&
die 'It looks like git-am is in progress. Cannot rebase.'
if test -d "$apply_dir"
then
type=am
state_dir="$apply_dir"
elif test -d "$merge_dir"
then
if test -f "$merge_dir"/interactive
then
type=interactive
interactive_rebase=explicit
else
type=merge
fi
state_dir="$merge_dir"
fi
test -n "$type" && in_progress=t
total_argc=$#
while test $# != 0
do
case "$1" in
--no-verify)
ok_to_skip_pre_rebase=yes
;;
--verify)
ok_to_skip_pre_rebase=
;;
--continue|--skip|--abort)
test $total_argc -eq 2 || usage
action=${1##--}
;;
--onto)
test 2 -le "$#" || usage
onto="$2"
shift
;;
-i)
interactive_rebase=explicit
;;
-k)
keep_empty=yes
;;
-p)
preserve_merges=t
test -z "$interactive_rebase" && interactive_rebase=implied
;;
--autosquash)
autosquash=t
;;
--no-autosquash)
autosquash=
;;
-M|-m)
do_merge=t
;;
-X)
shift
strategy_opts="$strategy_opts $(git rev-parse --sq-quote "--$1")"
do_merge=t
test -z "$strategy" && strategy=recursive
;;
-s)
shift
strategy="$1"
do_merge=t
;;
-n)
diffstat=
;;
--stat)
diffstat=t
;;
-v)
verbose=t
diffstat=t
GIT_QUIET=
;;
-q)
GIT_QUIET=t
git_am_opt="$git_am_opt -q"
verbose=
diffstat=
;;
--whitespace)
shift
git_am_opt="$git_am_opt --whitespace=$1"
case "$1" in
fix|strip)
force_rebase=t
;;
esac
;;
--ignore-whitespace)
git_am_opt="$git_am_opt $1"
;;
--committer-date-is-author-date|--ignore-date)
git_am_opt="$git_am_opt $1"
force_rebase=t
;;
-C)
shift
git_am_opt="$git_am_opt -C$1"
;;
--root)
rebase_root=t
;;
-f|--no-ff)
force_rebase=t
;;
--rerere-autoupdate|--no-rerere-autoupdate)
allow_rerere_autoupdate="$1"
;;
--)
shift
break
;;
esac
shift
done
test $# -gt 2 && usage
if test -n "$action"
then
test -z "$in_progress" && die "No rebase in progress?"
# Only interactive rebase uses detailed reflog messages
if test "$type" = interactive && test "$GIT_REFLOG_ACTION" = rebase
then
GIT_REFLOG_ACTION="rebase -i ($action)"
export GIT_REFLOG_ACTION
fi
fi
case "$action" in
continue)
# Sanity check
git rev-parse --verify HEAD >/dev/null ||
die "Cannot read HEAD"
git update-index --ignore-submodules --refresh &&
git diff-files --quiet --ignore-submodules || {
echo "You must edit all merge conflicts and then"
echo "mark them as resolved using git add"
exit 1
}
read_basic_state
run_specific_rebase
;;
skip)
output git reset --hard HEAD || exit $?
read_basic_state
run_specific_rebase
;;
abort)
git rerere clear
read_basic_state
case "$head_name" in
refs/*)
git symbolic-ref -m "rebase: aborting" HEAD $head_name ||
die "Could not move back to $head_name"
;;
esac
output git reset --hard $orig_head
rm -r "$state_dir"
exit
;;
esac
# Make sure no rebase is in progress
if test -n "$in_progress"
then
die '
It seems that there is already a '"${state_dir##*/}"' directory, and
I wonder if you are in the middle of another rebase. If that is the
case, please try
git rebase (--continue | --abort | --skip)
If that is not the case, please
rm -fr '"$state_dir"'
and run me again. I am stopping in case you still have something
valuable there.'
fi
if test -n "$interactive_rebase"
then
type=interactive
state_dir="$merge_dir"
elif test -n "$do_merge"
then
type=merge
state_dir="$merge_dir"
else
type=am
state_dir="$apply_dir"
fi
if test -z "$rebase_root"
then
case "$#" in
0)
if ! upstream_name=$(git rev-parse --symbolic-full-name \
--verify -q @{upstream} 2>/dev/null)
then
. git-parse-remote
error_on_missing_default_upstream "rebase" "rebase" \
"against" "git rebase <branch>"
fi
;;
*) upstream_name="$1"
shift
;;
esac
upstream=`git rev-parse --verify "${upstream_name}^0"` ||
die "invalid upstream $upstream_name"
upstream_arg="$upstream_name"
else
test -z "$onto" && die "You must specify --onto when using --root"
unset upstream_name
unset upstream
upstream_arg=--root
fi
# Make sure the branch to rebase onto is valid.
onto_name=${onto-"$upstream_name"}
case "$onto_name" in
*...*)
if left=${onto_name%...*} right=${onto_name#*...} &&
onto=$(git merge-base --all ${left:-HEAD} ${right:-HEAD})
then
case "$onto" in
?*"$LF"?*)
die "$onto_name: there are more than one merge bases"
;;
'')
die "$onto_name: there is no merge base"
;;
esac
else
die "$onto_name: there is no merge base"
fi
;;
*)
onto=$(git rev-parse --verify "${onto_name}^0") ||
die "Does not point to a valid commit: $onto_name"
;;
esac
# If the branch to rebase is given, that is the branch we will rebase
# $branch_name -- branch being rebased, or HEAD (already detached)
# $orig_head -- commit object name of tip of the branch before rebasing
# $head_name -- refs/heads/<that-branch> or "detached HEAD"
switch_to=
case "$#" in
1)
# Is it "rebase other $branchname" or "rebase other $commit"?
branch_name="$1"
switch_to="$1"
if git show-ref --verify --quiet -- "refs/heads/$1" &&
orig_head=$(git rev-parse -q --verify "refs/heads/$1")
then
head_name="refs/heads/$1"
elif orig_head=$(git rev-parse -q --verify "$1")
then
head_name="detached HEAD"
else
die "fatal: no such branch: $1"
fi
;;
*)
# Do not need to switch branches, we are already on it.
if branch_name=`git symbolic-ref -q HEAD`
then
head_name=$branch_name
branch_name=`expr "z$branch_name" : 'zrefs/heads/\(.*\)'`
else
head_name="detached HEAD"
branch_name=HEAD ;# detached
fi
orig_head=$(git rev-parse --verify "${branch_name}^0") || exit
;;
esac
require_clean_work_tree "rebase" "Please commit or stash them."
# Now we are rebasing commits $upstream..$orig_head (or with --root,
# everything leading up to $orig_head) on top of $onto
# Check if we are already based on $onto with linear history,
# but this should be done only when upstream and onto are the same
# and if this is not an interactive rebase.
mb=$(git merge-base "$onto" "$orig_head")
if test "$type" != interactive && test "$upstream" = "$onto" &&
test "$mb" = "$onto" &&
# linear history?
! (git rev-list --parents "$onto".."$orig_head" | sane_grep " .* ") > /dev/null
then
if test -z "$force_rebase"
then
# Lazily switch to the target branch if needed...
test -z "$switch_to" || git checkout "$switch_to" --
say "Current branch $branch_name is up to date."
exit 0
else
say "Current branch $branch_name is up to date, rebase forced."
fi
fi
# If a hook exists, give it a chance to interrupt
run_pre_rebase_hook "$upstream_arg" "$@"
if test -n "$diffstat"
then
if test -n "$verbose"
then
echo "Changes from $mb to $onto:"
fi
# We want color (if set), but no pager
GIT_PAGER='' git diff --stat --summary "$mb" "$onto"
fi
test "$type" = interactive && run_specific_rebase
# Detach HEAD and reset the tree
say "First, rewinding head to replay your work on top of it..."
git checkout -q "$onto^0" || die "could not detach HEAD"
git update-ref ORIG_HEAD $orig_head
# If the $onto is a proper descendant of the tip of the branch, then
# we just fast-forwarded.
if test "$mb" = "$orig_head"
then
say "Fast-forwarded $branch_name to $onto_name."
move_to_original_branch
exit 0
fi
if test -n "$rebase_root"
then
revisions="$onto..$orig_head"
else
revisions="$upstream..$orig_head"
fi
run_specific_rebase
|
TextusData/Mover
|
thirdparty/git-1.7.11.3/git-rebase.sh
|
Shell
|
gpl-3.0
| 13,390 |
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
set -ex
source $TRAVIS_BUILD_DIR/ci/travis_env_common.sh
pushd $ARROW_JS_DIR
npm run lint
npm run build
# run the non-snapshot unit tests
npm test
popd
|
dremio/arrow
|
ci/travis_script_js.sh
|
Shell
|
apache-2.0
| 963 |
#Aqueduct - Compliance Remediation Content
#Copyright (C) 2011,2012 Vincent C. Passaro ([email protected])
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor,
#Boston, MA 02110-1301, USA.
#!/bin/bash
######################################################################
#By Tummy a.k.a Vincent C. Passaro #
#Vincent[.]Passaro[@]gmail[.]com #
#www.vincentpassaro.com #
######################################################################
#_____________________________________________________________________
#| Version | Change Information | Author | Date |
#|__________|_______________________|____________________|____________|
#| 1.0 | Initial Script | Vincent C. Passaro | 20-oct-2011|
#| | Creation | | |
#|__________|_______________________|____________________|____________|
#######################DISA INFORMATION###############################
#Group ID (Vulid): V-4087
#Group Title: Local Initialization Files World Writable Programs
#Rule ID: SV-4087r6_rule
#Severity: CAT II
#Rule Version (STIG-ID): GEN001940
#Rule Title: User start-up files must not execute world-writable programs.
#
#Vulnerability Discussion: If start-up files execute world-writable
#programs, especially in unprotected directories, they could be
#maliciously modified to become trojans that destroy user files or
#otherwise #compromise the system at the user, or higher, level. If the
#system is compromised at the user level, it is much easier to eventually
#compromise the system at the root and network level.
#
#Responsibility: System Administrator
#IAControls: DCSW-1
#
#Check Content:
#Check local initialization files for any executed world-writable
#programs or scripts and scripts executing from world writable
#directories.
#
#Procedure:
#For each home directory on the system make a list of files
#referenced within any local initialization script.
#Show the mode for each file and it's parent directory.
#
# FILES=".login .cshrc .logout .profile .bash_profile .bashrc
#.bash_logout .env .dtprofile .dispatch .emacs .exrc";
#
# for HOMEDIR in `cut -d: -f6 /etc/passwd|sort|uniq;do for INIFILE in
#$FILES;do REFLIST=`egrep " [\"~]?/" ${HOMEDIR}/${INIFILE} 2>null|sed
#"s/.*\([~ \"]\/[\.0-9A-Za-z_\/\-]*\).*/\1/"`;for REFFILE in $REFLIST;do
#FULLREF=`echo $REFFILE|sed "s:\~:${HOMEDIR}:g"|sed "s:^\s*::g"`;dirname
#$FULLREF|xargs stat -c "dir:%a:%n";stat -c "file:%:%n" $FULLREF;done;done;
#done|sort|uniq
#
#If any local initialization file executes a world-writable program or
#script or a script from a world -writable directory, this is a finding.
#
#Fix Text: Remove the world-writable permission of files referenced by
#local initialization scripts, or remove the references to these files
#in the local initialization scripts.
#######################DISA INFORMATION###############################
#DISA'S SUGGESTION IS BROKEN (per the standard), DONT USE!! Fixed syntax is refrenced
#as DISAJUNK
#Global Variables#
PDI=GEN001940
FILES=".login .cshrc .logout .profile .bash_profile .bashrc .bash_logout .env .dtprofile .dispatch .emacs .exrc"
DISAJUNK=$( for HOMEDIR in `cut -d: -f6 /etc/passwd | sort | uniq | grep -v "^/$"`
do
for INIFILE in $FILES
do REFLIST=$( egrep " [\"~]?/" ${HOMEDIR}/${INIFILE} 2>/dev/null|sed "s/.*\([~ \"]\/[\.0-9A-Za-z_\/\-]*\).*/\1/")
for REFFILE in $REFLIST
do FULLREF=$( echo $REFFILE | sed "s:\~:${HOMEDIR}:g" | sed "s:^\s*::g" )
dirname $FULLREF|xargs stat -c "dir:%a:%n" | cut -d ":" -f 3
stat -c "file:%:%n" $FULLREF | cut -d ":" -f 3
done | sort | uniq
done
done
)
BADFILE=$( for file in $DISAJUNK
do
#echo $file
find $file -maxdepth 0 -perm -002 -type d | grep $file
done | sort | uniq )
#Start-Lockdown
for line in $BADFILE
do
chmod o-w $line
done
|
quark-pat/CLIP
|
packages/aqueduct/aqueduct/compliance/Bash/STIG/rhel-5-beta/prod/GEN001940.sh
|
Shell
|
apache-2.0
| 4,563 |
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# kubernetes-e2e-{gce, gke, gke-ci} jobs: This script is triggered by
# the kubernetes-build job, or runs every half hour. We abort this job
# if it takes more than 75m. As of initial commit, it typically runs
# in about half an hour.
#
# The "Workspace Cleanup Plugin" is installed and in use for this job,
# so the ${WORKSPACE} directory (the current directory) is currently
# empty.
set -o errexit
set -o nounset
set -o pipefail
set -o xtrace
# Join all args with |
# Example: join_regex_allow_empty a b "c d" e => a|b|c d|e
function join_regex_allow_empty() {
local IFS="|"
echo "$*"
}
# Join all args with |, butin case of empty result prints "EMPTY\sSET" instead.
# Example: join_regex_no_empty a b "c d" e => a|b|c d|e
# join_regex_no_empty => EMPTY\sSET
function join_regex_no_empty() {
local IFS="|"
if [ -z "$*" ]; then
echo "EMPTY\sSET"
else
echo "$*"
fi
}
echo "--------------------------------------------------------------------------------"
echo "Initial Environment:"
printenv | sort
echo "--------------------------------------------------------------------------------"
if [[ "${CIRCLECI:-}" == "true" ]]; then
JOB_NAME="circleci-${CIRCLE_PROJECT_USERNAME}-${CIRCLE_PROJECT_REPONAME}"
BUILD_NUMBER=${CIRCLE_BUILD_NUM}
WORKSPACE=`pwd`
else
# Jenkins?
export HOME=${WORKSPACE} # Nothing should want Jenkins $HOME
fi
# Additional parameters that are passed to hack/e2e.go
E2E_OPT=${E2E_OPT:-""}
# Set environment variables shared for all of the GCE Jenkins projects.
if [[ ${JOB_NAME} =~ ^kubernetes-.*-gce ]]; then
KUBERNETES_PROVIDER="gce"
: ${E2E_MIN_STARTUP_PODS:="1"}
: ${E2E_ZONE:="us-central1-f"}
: ${MASTER_SIZE:="n1-standard-2"}
: ${MINION_SIZE:="n1-standard-2"}
: ${NUM_MINIONS:="3"}
fi
if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then
if [[ "${PERFORMANCE:-}" == "true" ]]; then
: ${MASTER_SIZE:="m3.xlarge"}
: ${NUM_MINIONS:="100"}
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Performance\ssuite\]"}
else
: ${MASTER_SIZE:="t2.small"}
: ${NUM_MINIONS:="2"}
fi
fi
# Specialized tests which should be skipped by default for projects.
GCE_DEFAULT_SKIP_TESTS=(
"Skipped"
"Reboot"
"Restart"
"Example"
)
# The following tests are known to be flaky, and are thus run only in their own
# -flaky- build variants.
GCE_FLAKY_TESTS=(
"ResourceUsage"
)
# Tests which are not able to be run in parallel.
GCE_PARALLEL_SKIP_TESTS=(
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}}
"Autoscaling"
"Etcd"
"NetworkingNew"
"Nodes\sNetwork"
"Nodes\sResize"
"MaxPods"
"ResourceUsage"
"SchedulerPredicates"
"Services.*restarting"
"Shell.*services"
)
# Tests which are known to be flaky when run in parallel.
GCE_PARALLEL_FLAKY_TESTS=(
"Elasticsearch"
"PD"
"ServiceAccounts"
"Service\sendpoints\slatency"
"Services.*change\sthe\stype"
"Services.*functioning\sexternal\sload\sbalancer"
"Services.*identically\snamed"
"Services.*release.*load\sbalancer"
)
# Tests that should not run on soak cluster.
GCE_SOAK_CONTINUOUS_SKIP_TESTS=(
"Autoscaling"
"Density.*30\spods"
"Elasticsearch"
"Etcd.*SIGKILL"
"external\sload\sbalancer"
"identically\snamed\sservices"
"network\spartition"
"Reboot"
"Resize"
"Restart"
"Services.*Type\sgoes\sfrom"
"Services.*nodeport\ssettings"
"Skipped"
)
GCE_RELEASE_SKIP_TESTS=(
"Autoscaling"
)
# Define environment variables based on the Jenkins project name.
case ${JOB_NAME} in
# Runs all non-flaky tests on GCE, sequentially.
kubernetes-e2e-gce)
: ${E2E_CLUSTER_NAME:="jenkins-gce-e2e"}
: ${E2E_DOWN:="false"}
: ${E2E_NETWORK:="e2e-gce"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
)"}
: ${KUBE_GCE_INSTANCE_PREFIX="e2e-gce"}
: ${PROJECT:="k8s-jkns-e2e-gce"}
# Override GCE default for cluster size autoscaling purposes.
ENABLE_CLUSTER_MONITORING="googleinfluxdb"
;;
# Runs only the examples tests on GCE.
kubernetes-e2e-gce-examples)
: ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-examples"}
: ${E2E_DOWN:="false"}
: ${E2E_NETWORK:="e2e-examples"}
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Example"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-examples"}
: ${PROJECT:="kubernetes-jenkins"}
;;
# Runs the flaky tests on GCE, sequentially.
kubernetes-e2e-gce-flaky)
: ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-flaky"}
: ${E2E_DOWN:="false"}
: ${E2E_NETWORK:="e2e-flaky"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
) --ginkgo.focus=$(join_regex_no_empty \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
)"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-flaky"}
: ${PROJECT:="k8s-jkns-e2e-gce-flaky"}
;;
# Runs all non-flaky tests on GCE in parallel.
kubernetes-e2e-gce-parallel)
: ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-parallel"}
: ${E2E_NETWORK:="e2e-parallel"}
: ${GINKGO_PARALLEL:="y"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \
${GCE_PARALLEL_FLAKY_TESTS[@]:+${GCE_PARALLEL_FLAKY_TESTS[@]}} \
)"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-test-parallel"}
: ${PROJECT:="kubernetes-jenkins"}
# Override GCE defaults.
NUM_MINIONS="6"
;;
# Runs the flaky tests on GCE in parallel.
kubernetes-e2e-gce-parallel-flaky)
: ${E2E_CLUSTER_NAME:="parallel-flaky"}
: ${E2E_NETWORK:="e2e-parallel-flaky"}
: ${GINKGO_PARALLEL:="y"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \
) --ginkgo.focus=$(join_regex_no_empty \
${GCE_PARALLEL_FLAKY_TESTS[@]:+${GCE_PARALLEL_FLAKY_TESTS[@]}} \
)"}
: ${KUBE_GCE_INSTANCE_PREFIX:="parallel-flaky"}
: ${PROJECT:="k8s-jkns-e2e-gce-prl-flaky"}
# Override GCE defaults.
NUM_MINIONS="4"
;;
# Runs only the reboot tests on GCE.
kubernetes-e2e-gce-reboot)
: ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-reboot"}
: ${E2E_DOWN:="false"}
: ${E2E_NETWORK:="e2e-reboot"}
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Reboot"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-reboot"}
: ${PROJECT:="kubernetes-jenkins"}
;;
# Runs the performance/scalability tests on GCE. A larger cluster is used.
kubernetes-e2e-gce-scalability)
: ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-scalability"}
: ${E2E_NETWORK:="e2e-scalability"}
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Performance\ssuite"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-scalability"}
: ${PROJECT:="kubernetes-jenkins"}
# Override GCE defaults.
MASTER_SIZE="n1-standard-4"
MINION_SIZE="n1-standard-2"
MINION_DISK_SIZE="50GB"
NUM_MINIONS="100"
;;
# Runs tests on GCE soak cluster.
kubernetes-soak-continuous-e2e-gce)
: ${E2E_CLUSTER_NAME:="gce-soak-weekly"}
: ${E2E_DOWN:="false"}
: ${E2E_NETWORK:="gce-soak-weekly"}
: ${E2E_UP:="false"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GCE_SOAK_CONTINUOUS_SKIP_TESTS[@]:+${GCE_SOAK_CONTINUOUS_SKIP_TESTS[@]}} \
)"}
: ${KUBE_GCE_INSTANCE_PREFIX:="gce-soak-weekly"}
: ${PROJECT:="kubernetes-jenkins"}
;;
# Runs a subset of tests on GCE in parallel. Run against all pending PRs.
kubernetes-pull-build-test-e2e-gce)
: ${E2E_CLUSTER_NAME:="jenkins-pull-gce-e2e-${EXECUTOR_NUMBER}"}
: ${E2E_NETWORK:="pull-e2e-parallel-${EXECUTOR_NUMBER}"}
: ${GINKGO_PARALLEL:="y"}
# This list should match the list in kubernetes-e2e-gce-parallel. It
# currently also excludes a slow namespace test.
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \
${GCE_PARALLEL_FLAKY_TESTS[@]:+${GCE_PARALLEL_FLAKY_TESTS[@]}} \
)"}
: ${KUBE_GCE_INSTANCE_PREFIX:="pull-e2e-${EXECUTOR_NUMBER}"}
: ${KUBE_GCS_STAGING_PATH_SUFFIX:="-${EXECUTOR_NUMBER}"}
: ${PROJECT:="kubernetes-jenkins-pull"}
# Override GCE defaults.
MASTER_SIZE="n1-standard-1"
MINION_SIZE="n1-standard-1"
NUM_MINIONS="2"
;;
# Runs non-flaky tests on GCE on the release-latest branch,
# sequentially. As a reminder, if you need to change the skip list
# or flaky test list on the release branch, you'll need to propose a
# pull request directly to the release branch itself.
kubernetes-e2e-gce-release)
: ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-release"}
: ${E2E_DOWN:="false"}
: ${E2E_NETWORK:="e2e-gce-release"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_RELEASE_SKIP_TESTS[@]:+${GCE_RELEASE_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
)"}
: ${KUBE_GCE_INSTANCE_PREFIX="e2e-gce"}
: ${PROJECT:="k8s-jkns-e2e-gce-release"}
;;
esac
# AWS variables
export KUBE_AWS_INSTANCE_PREFIX=${E2E_CLUSTER_NAME}
export KUBE_AWS_ZONE=${E2E_ZONE}
# GCE variables
export INSTANCE_PREFIX=${E2E_CLUSTER_NAME}
export KUBE_GCE_ZONE=${E2E_ZONE}
export KUBE_GCE_NETWORK=${E2E_NETWORK}
export KUBE_GCE_INSTANCE_PREFIX=${KUBE_GCE_INSTANCE_PREFIX:-}
export KUBE_GCS_STAGING_PATH_SUFFIX=${KUBE_GCS_STAGING_PATH_SUFFIX:-}
# GKE variables
export CLUSTER_NAME=${E2E_CLUSTER_NAME}
export ZONE=${E2E_ZONE}
export KUBE_GKE_NETWORK=${E2E_NETWORK}
# Shared cluster variables
export E2E_MIN_STARTUP_PODS=${E2E_MIN_STARTUP_PODS:-}
export KUBE_ENABLE_CLUSTER_MONITORING=${ENABLE_CLUSTER_MONITORING:-}
export MASTER_SIZE=${MASTER_SIZE:-}
export MINION_SIZE=${MINION_SIZE:-}
export NUM_MINIONS=${NUM_MINIONS:-}
export PROJECT=${PROJECT:-}
export PATH=${PATH}:/usr/local/go/bin
export KUBE_SKIP_CONFIRMATIONS=y
# E2E Control Variables
export E2E_UP="${E2E_UP:-true}"
export E2E_TEST="${E2E_TEST:-true}"
export E2E_DOWN="${E2E_DOWN:-true}"
# Used by hack/ginkgo-e2e.sh to enable ginkgo's parallel test runner.
export GINKGO_PARALLEL=${GINKGO_PARALLEL:-}
echo "--------------------------------------------------------------------------------"
echo "Test Environment:"
printenv | sort
echo "--------------------------------------------------------------------------------"
# We get the Kubernetes tarballs on either cluster creation or when we want to
# replace existing ones in a multi-step job (e.g. a cluster upgrade).
if [[ "${E2E_UP,,}" == "true" || "${JENKINS_FORCE_GET_TARS:-}" =~ ^[yY]$ ]]; then
if [[ ${KUBE_RUN_FROM_OUTPUT:-} =~ ^[yY]$ ]]; then
echo "Found KUBE_RUN_FROM_OUTPUT=y; will use binaries from _output"
cp _output/release-tars/kubernetes*.tar.gz .
else
echo "Pulling binaries from GCS"
# In a multi-step job, clean up just the kubernetes build files.
# Otherwise, we want a completely empty directory.
if [[ "${JENKINS_FORCE_GET_TARS:-}" =~ ^[yY]$ ]]; then
rm -rf kubernetes*
elif [[ $(find . | wc -l) != 1 ]]; then
echo $PWD not empty, bailing!
exit 1
fi
# Tell kube-up.sh to skip the update, it doesn't lock. An internal
# gcloud bug can cause racing component updates to stomp on each
# other.
export KUBE_SKIP_UPDATE=y
sudo flock -x -n /var/run/lock/gcloud-components.lock -c "gcloud components update -q" || true
sudo flock -x -n /var/run/lock/gcloud-components.lock -c "gcloud components update preview -q" || true
sudo flock -x -n /var/run/lock/gcloud-components.lock -c "gcloud components update alpha -q" || true
sudo flock -x -n /var/run/lock/gcloud-components.lock -c "gcloud components update beta -q" || true
if [[ ! -z ${JENKINS_EXPLICIT_VERSION:-} ]]; then
# Use an explicit pinned version like "ci/v0.10.0-101-g6c814c4" or
# "release/v0.19.1"
IFS='/' read -a varr <<< "${JENKINS_EXPLICIT_VERSION}"
bucket="${varr[0]}"
githash="${varr[1]}"
echo "$bucket / $githash"
elif [[ ${JENKINS_USE_SERVER_VERSION:-} =~ ^[yY]$ ]]; then
# for GKE we can use server default version.
bucket="release"
msg=$(gcloud ${CMD_GROUP} container get-server-config --project=${PROJECT} --zone=${ZONE} | grep defaultClusterVersion)
# msg will look like "defaultClusterVersion: 1.0.1". Strip
# everything up to, including ": "
githash="v${msg##*: }"
else
# The "ci" bucket is for builds like "v0.15.0-468-gfa648c1"
bucket="ci"
# The "latest" version picks the most recent "ci" or "release" build.
version_file="latest"
if [[ ${JENKINS_USE_RELEASE_TARS:-} =~ ^[yY]$ ]]; then
# The "release" bucket is for builds like "v0.15.0"
bucket="release"
if [[ ${JENKINS_USE_STABLE:-} =~ ^[yY]$ ]]; then
# The "stable" version picks the most recent "release" build.
version_file="stable"
fi
fi
githash=$(gsutil cat gs://kubernetes-release/${bucket}/${version_file}.txt)
fi
# At this point, we want to have the following vars set:
# - bucket
# - githash
gsutil -m cp gs://kubernetes-release/${bucket}/${githash}/kubernetes.tar.gz gs://kubernetes-release/${bucket}/${githash}/kubernetes-test.tar.gz .
fi
if [[ ! "${CIRCLECI:-}" == "true" ]]; then
# Copy GCE keys so we don't keep cycling them.
# To set this up, you must know the <project>, <zone>, and <instance>
# on which your jenkins jobs are running. Then do:
#
# # SSH from your computer into the instance.
# $ gcloud compute ssh --project="<prj>" ssh --zone="<zone>" <instance>
#
# # Generate a key by ssh'ing from the instance into itself, then exit.
# $ gcloud compute ssh --project="<prj>" ssh --zone="<zone>" <instance>
# $ ^D
#
# # Copy the keys to the desired location (e.g. /var/lib/jenkins/gce_keys/).
# $ sudo mkdir -p /var/lib/jenkins/gce_keys/
# $ sudo cp ~/.ssh/google_compute_engine /var/lib/jenkins/gce_keys/
# $ sudo cp ~/.ssh/google_compute_engine.pub /var/lib/jenkins/gce_keys/
#
# # Move the permissions for the keys to Jenkins.
# $ sudo chown -R jenkins /var/lib/jenkins/gce_keys/
# $ sudo chgrp -R jenkins /var/lib/jenkins/gce_keys/
if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then
echo "Skipping SSH key copying for AWS"
else
mkdir -p ${WORKSPACE}/.ssh/
cp /var/lib/jenkins/gce_keys/google_compute_engine ${WORKSPACE}/.ssh/
cp /var/lib/jenkins/gce_keys/google_compute_engine.pub ${WORKSPACE}/.ssh/
fi
fi
md5sum kubernetes*.tar.gz
tar -xzf kubernetes.tar.gz
tar -xzf kubernetes-test.tar.gz
# Set by GKE-CI to change the CLUSTER_API_VERSION to the git version
if [[ ! -z ${E2E_SET_CLUSTER_API_VERSION:-} ]]; then
export CLUSTER_API_VERSION=$(echo ${githash} | cut -c 2-)
elif [[ ${JENKINS_USE_RELEASE_TARS:-} =~ ^[yY]$ ]]; then
release=$(gsutil cat gs://kubernetes-release/release/${version_file}.txt | cut -c 2-)
export CLUSTER_API_VERSION=${release}
fi
fi
cd kubernetes
# Have cmd/e2e run by goe2e.sh generate JUnit report in ${WORKSPACE}/junit*.xml
ARTIFACTS=${WORKSPACE}/_artifacts
mkdir -p ${ARTIFACTS}
export E2E_REPORT_DIR=${ARTIFACTS}
### Set up ###
if [[ "${E2E_UP,,}" == "true" ]]; then
go run ./hack/e2e.go ${E2E_OPT} -v --down
go run ./hack/e2e.go ${E2E_OPT} -v --up
go run ./hack/e2e.go -v --ctl="version --match-server-version=false"
fi
### Run tests ###
# Jenkins will look at the junit*.xml files for test failures, so don't exit
# with a nonzero error code if it was only tests that failed.
if [[ "${E2E_TEST,,}" == "true" ]]; then
go run ./hack/e2e.go ${E2E_OPT} -v --test --test_args="${GINKGO_TEST_ARGS}" && exitcode=0 || exitcode=$?
if [[ "${E2E_PUBLISH_GREEN_VERSION:-}" == "true" && ${exitcode} == 0 && -n ${githash:-} ]]; then
echo "publish githash to ci/latest-green.txt: ${githash}"
echo "${githash}" > ${WORKSPACE}/githash.txt
gsutil cp ${WORKSPACE}/githash.txt gs://kubernetes-release/ci/latest-green.txt
fi
fi
# TODO(zml): We have a bunch of legacy Jenkins configs that are
# expecting junit*.xml to be in ${WORKSPACE} root and it's Friday
# afternoon, so just put the junit report where it's expected.
# If link already exists, non-zero return code should not cause build to fail.
for junit in ${ARTIFACTS}/junit*.xml; do
ln -s -f ${junit} ${WORKSPACE} || true
done
### Clean up ###
if [[ "${E2E_DOWN,,}" == "true" ]]; then
# Sleep before deleting the cluster to give the controller manager time to
# delete any cloudprovider resources still around from the last test.
# This is calibrated to allow enough time for 3 attempts to delete the
# resources. Each attempt is allocated 5 seconds for requests to the
# cloudprovider plus the processingRetryInterval from servicecontroller.go
# for the wait between attempts.
sleep 30
go run ./hack/e2e.go ${E2E_OPT} -v --down
fi
|
squaremo/kubernetes
|
hack/jenkins/e2e.sh
|
Shell
|
apache-2.0
| 18,537 |
#!/bin/bash
pwd
./abgr.exe
./argb.exe
./bgra.exe
./rgba.exe
./cmyk.exe
|
dmilos/color
|
example/less-than-1k/format/AAA2/run.sh
|
Shell
|
apache-2.0
| 72 |
#!/bin/bash
function use(){
echo "Usage: $0 [sim|phone] paintown-dir"
}
# Check if arguments are given and the paintown directory exists
if [ -n "$2" ] && [ -d "$2" ]; then
if [ "$1" = "sim" ]; then
make clean
CFLAGS='-arch i386' LDFLAGS='-arch i386' CC=/Developer/Platforms/iPhoneSimulator.platform/Developer/usr/bin/gcc-4.2 ./configure --prefix=$2/misc/allegro-iphone/install/sim/ && make && make install
elif [ "$1" = "phone" ]; then
make clean
CFLAGS='-arch armv6 -isysroot /Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS4.3.sdk -DDARWIN_NO_CARBON' LDFLAGS='-arch armv6 -isysroot /Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS4.3.sdk' CC=/Developer/Platforms/iPhoneOS.platform/Developer/usr/bin/gcc-4.2 ./configure --host=arm --build=i386 --prefix=$2/misc/allegro-iphone/install/phone/ --with-sysroot=/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS4.3.sdk && make && make install
fi
else
use
fi
|
scristopher/paintown
|
misc/allegro-iphone/build-freetype.sh
|
Shell
|
bsd-3-clause
| 999 |
#!/bin/sh
# Configure script for Hugs (using Microsoft Visual C++)
# Before we can run the configure script, we have to patch some
# incompatabilities between Unix and Windows:
#
# o Visual C++ can't handle the file descriptor that bash (from
# cygwin beta release 16) passes to it when stdout is redirected
# to /dev/null.
#
# o Visual C++ writes input filenames to stderr as it processes them.
sed ../unix/configure >./config.fix \
-e "s#/dev/null#conf_devnull#" \
-e "s/-v '\^ \*+'/-i \"error\\\|warning\"/g"
# Now we override the default values of some environment variables.
set -a # All modified env vars are to be exported!
CC=${CC="cl /nologo"}
DEBUGFLAGS=${DEBUGFLAGS="-Zi"}
LDDEBUGFLAGS=${LDDEBUGFLAGS="-Zi"}
OPTFLAGS=${OPTFLAGS="-O2"}
CFLAGS=${CFLAGS="-ML"}
LDFLAGS=$LD
DLL_FLAGS="/LD"
CPP=${CPP="cl /nologo /E"}
LIBS=${LIBS="kernel32.lib advapi32.lib"}
GUILIBS="kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib comctl32.lib winmm.lib advapi32.lib"
# Run the script
./config.fix --target=windows $*
# Store the generated files for the benefit of those who can't
# run configure directly.
echo "Copying ../Makefile, ../config.h and ../options.h to ."
cp ../Makefile ../config.h ../options.h .
# End
|
OS2World/DEV-UTIL-HUGS
|
src/msc/config.sh
|
Shell
|
bsd-3-clause
| 1,282 |
#!/bin/bash
config=$(curl -s ${CONFIG_URL})
MEMCACHED_SERVERS=$(echo "$config" | jq -r '.MEMCACHE_HOST')
ACTIVATION_TOKEN=$(echo "$config" | jq -r '.RAILGUN_ACTIVATION_TOKEN')
ACTIVATION_RAILGUN_HOST=$(echo "$config" | jq -r '.RAILGUN_ACTIVATION_HOST')
sed -i "s/^memcached\.servers.*/memcached\.servers = ${MEMCACHED_SERVERS}/g" /etc/railgun/railgun.conf
sed -i "s/^activation\.token.*/activation\.token = ${ACTIVATION_TOKEN}/g" /etc/railgun/railgun.conf
sed -i "s/^activation\.railgun_host.*/activation\.railgun_host = ${ACTIVATION_RAILGUN_HOST}/g" /etc/railgun/railgun.conf
sed -i "s@^stderr\.file.*@stderr.file = /dev/stdout@g" /etc/railgun/railgun.conf
sed -i "s/^log\.level.*/log.level = 5/g" /etc/railgun/railgun.conf
exec /usr/bin/rg-listener -config=/etc/railgun/railgun.conf
|
ArabellaTech/aa-docker-tools
|
testing/nginx/railgun/railgun.sh
|
Shell
|
mit
| 789 |
#!/usr/bin/env bash
# Ensure this file is executable via `chmod a+x lein`, then place it
# somewhere on your $PATH, like ~/bin. The rest of Leiningen will be
# installed upon first run into the ~/.lein/self-installs directory.
export LEIN_VERSION="2.5.0"
case $LEIN_VERSION in
*SNAPSHOT) SNAPSHOT="YES" ;;
*) SNAPSHOT="NO" ;;
esac
if [[ "$OSTYPE" == "cygwin" ]] || [[ "$OSTYPE" == "msys" ]]; then
delimiter=";"
else
delimiter=":"
fi
if [[ "$OSTYPE" == "cygwin" ]]; then
cygwin=true
else
cygwin=false
fi
function make_native_path {
# ensure we have native paths
if $cygwin && [[ "$1" == /* ]]; then
echo -n "$(cygpath -wp "$1")"
elif [[ "$OSTYPE" == "msys" && "$1" == /?/* ]]; then
echo -n "$(sh -c "(cd $1 2</dev/null && pwd -W) || echo $1 | sed 's/^\\/\([a-z]\)/\\1:/g'")"
else
echo -n "$1"
fi
}
# usage : add_path PATH_VAR [PATH]...
function add_path {
local path_var="$1"
shift
while [ -n "$1" ];do
# http://bashify.com/?Useful_Techniques:Indirect_Variables:Indirect_Assignment
if [[ -z ${!path_var} ]]; then
export ${path_var}="$(make_native_path "$1")"
else
export ${path_var}="${!path_var}${delimiter}$(make_native_path "$1")"
fi
shift
done
}
function download_failed_message {
echo "Failed to download $1 (exit code $2)"
echo "It's possible your HTTP client's certificate store does not have the"
echo "correct certificate authority needed. This is often caused by an"
echo "out-of-date version of libssl. It's also possible that you're behind a"
echo "firewall and haven't set HTTP_PROXY and HTTPS_PROXY."
}
function self_install {
if [ -r "$LEIN_JAR" ]; then
echo "The self-install jar already exists at $LEIN_JAR."
echo "If you wish to re-download, delete it and rerun \"$0 self-install\"."
exit 1
fi
echo "Downloading Leiningen to $LEIN_JAR now..."
mkdir -p "$(dirname "$LEIN_JAR")"
LEIN_URL="https://github.com/technomancy/leiningen/releases/download/$LEIN_VERSION/leiningen-$LEIN_VERSION-standalone.jar"
$HTTP_CLIENT "$LEIN_JAR.pending" "$LEIN_URL"
local exit_code=$?
if [ $exit_code == 0 ]; then
# TODO: checksum
mv -f "$LEIN_JAR.pending" "$LEIN_JAR"
else
rm "$LEIN_JAR.pending" 2> /dev/null
download_failed_message "$LEIN_URL" "$exit_code"
exit 1
fi
}
if [ `id -u` -eq 0 ] && [ "$LEIN_ROOT" = "" ]; then
echo "WARNING: You're currently running as root; probably by accident."
echo "Press control-C to abort or Enter to continue as root."
echo "Set LEIN_ROOT to disable this warning."
read _
fi
NOT_FOUND=1
ORIGINAL_PWD="$PWD"
while [ ! -r "$PWD/project.clj" ] && [ "$PWD" != "/" ] && [ $NOT_FOUND -ne 0 ]
do
cd ..
if [ "$(dirname "$PWD")" = "/" ]; then
NOT_FOUND=0
cd "$ORIGINAL_PWD"
fi
done
export LEIN_HOME="${LEIN_HOME:-"$HOME/.lein"}"
for f in "$LEIN_HOME/leinrc" ".leinrc"; do
if [ -e "$f" ]; then
source "$f"
fi
done
if $cygwin; then
export LEIN_HOME=`cygpath -w "$LEIN_HOME"`
fi
LEIN_JAR="$LEIN_HOME/self-installs/leiningen-$LEIN_VERSION-standalone.jar"
# normalize $0 on certain BSDs
if [ "$(dirname "$0")" = "." ]; then
SCRIPT="$(which $(basename "$0"))"
else
SCRIPT="$0"
fi
# resolve symlinks to the script itself portably
while [ -h "$SCRIPT" ] ; do
ls=`ls -ld "$SCRIPT"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
SCRIPT="$link"
else
SCRIPT="$(dirname "$SCRIPT"$)/$link"
fi
done
BIN_DIR="$(dirname "$SCRIPT")"
export LEIN_JVM_OPTS="${LEIN_JVM_OPTS-"-XX:+TieredCompilation -XX:TieredStopAtLevel=1"}"
# This needs to be defined before we call HTTP_CLIENT below
if [ "$HTTP_CLIENT" = "" ]; then
if type -p curl >/dev/null 2>&1; then
if [ "$https_proxy" != "" ]; then
CURL_PROXY="-x $https_proxy"
fi
HTTP_CLIENT="curl $CURL_PROXY -f -L -o"
else
HTTP_CLIENT="wget -O"
fi
fi
# When :eval-in :classloader we need more memory
grep -E -q '^\s*:eval-in\s+:classloader\s*$' project.clj 2> /dev/null && \
export LEIN_JVM_OPTS="$LEIN_JVM_OPTS -Xms64m -Xmx512m"
if [ -r "$BIN_DIR/../src/leiningen/version.clj" ]; then
# Running from source checkout
LEIN_DIR="$(dirname "$BIN_DIR")"
# Need to use lein release to bootstrap the leiningen-core library (for aether)
if [ ! -r "$LEIN_DIR/leiningen-core/.lein-bootstrap" ]; then
echo "Leiningen is missing its dependencies."
echo "Please run \"lein bootstrap\" in the leiningen-core/ directory"
echo "with a stable release of Leiningen. See CONTRIBUTING.md for details."
exit 1
fi
# If project.clj for lein or leiningen-core changes, we must recalculate
LAST_PROJECT_CHECKSUM=$(cat "$LEIN_DIR/.lein-project-checksum" 2> /dev/null)
PROJECT_CHECKSUM=$(sum "$LEIN_DIR/project.clj" "$LEIN_DIR/leiningen-core/project.clj")
if [ "$PROJECT_CHECKSUM" != "$LAST_PROJECT_CHECKSUM" ]; then
if [ -r "$LEIN_DIR/.lein-classpath" ]; then
rm "$LEIN_DIR/.lein-classpath"
fi
fi
# Use bin/lein to calculate its own classpath.
if [ ! -r "$LEIN_DIR/.lein-classpath" ] && [ "$1" != "classpath" ]; then
echo "Recalculating Leiningen's classpath."
ORIG_PWD="$PWD"
cd "$LEIN_DIR"
LEIN_NO_USER_PROFILES=1 $0 classpath .lein-classpath
sum "$LEIN_DIR/project.clj" "$LEIN_DIR/leiningen-core/project.clj" > \
.lein-project-checksum
cd "$ORIG_PWD"
fi
mkdir -p "$LEIN_DIR/target/classes"
export LEIN_JVM_OPTS="$LEIN_JVM_OPTS -Dclojure.compile.path=$LEIN_DIR/target/classes"
add_path CLASSPATH "$LEIN_DIR/leiningen-core/src/" "$LEIN_DIR/leiningen-core/resources/" \
"$LEIN_DIR/test:$LEIN_DIR/target/classes" "$LEIN_DIR/src" ":$LEIN_DIR/resources"
if [ -r "$LEIN_DIR/.lein-classpath" ]; then
add_path CLASSPATH "$(cat "$LEIN_DIR/.lein-classpath" 2> /dev/null)"
else
add_path CLASSPATH "$(cat "$LEIN_DIR/leiningen-core/.lein-bootstrap" 2> /dev/null)"
fi
else # Not running from a checkout
add_path CLASSPATH "$LEIN_JAR"
BOOTCLASSPATH="-Xbootclasspath/a:$LEIN_JAR"
if [ ! -r "$LEIN_JAR" -a "$1" != "self-install" ]; then
self_install
fi
fi
# TODO: explain what to do when Java is missing
export JAVA_CMD="${JAVA_CMD:-"java"}"
export LEIN_JAVA_CMD="${LEIN_JAVA_CMD:-$JAVA_CMD}"
if [[ -z "${DRIP_INIT+x}" && "$(basename "$LEIN_JAVA_CMD")" == *drip* ]]; then
export DRIP_INIT="$(printf -- '-e\n(require (quote leiningen.repl))')"
export DRIP_INIT_CLASS="clojure.main"
fi
# Support $JAVA_OPTS for backwards-compatibility.
export JVM_OPTS="${JVM_OPTS:-"$JAVA_OPTS"}"
# Handle jline issue with cygwin not propagating OSTYPE through java subprocesses: https://github.com/jline/jline2/issues/62
cygterm=false
if $cygwin; then
case "$TERM" in
rxvt* | xterm* | vt*) cygterm=true ;;
esac
fi
if $cygterm; then
LEIN_JVM_OPTS="$LEIN_JVM_OPTS -Djline.terminal=jline.UnixTerminal"
stty -icanon min 1 -echo > /dev/null 2>&1
fi
# TODO: investigate http://skife.org/java/unix/2011/06/20/really_executable_jars.html
# If you're packaging this for a package manager (.deb, homebrew, etc)
# you need to remove the self-install and upgrade functionality or see lein-pkg.
if [ "$1" = "self-install" ]; then
if [ -r "$BIN_DIR/../src/leiningen/version.clj" ]; then
echo "Running self-install from a checkout is not supported."
echo "See CONTRIBUTING.md for SNAPSHOT-specific build instructions."
exit 1
fi
echo "Manual self-install is deprecated; it will run automatically when necessary."
self_install
elif [ "$1" = "upgrade" ] || [ "$1" = "downgrade" ]; then
if [ "$LEIN_DIR" != "" ]; then
echo "The upgrade task is not meant to be run from a checkout."
exit 1
fi
if [ $SNAPSHOT = "YES" ]; then
echo "The upgrade task is only meant for stable releases."
echo "See the \"Hacking\" section of the README."
exit 1
fi
if [ ! -w "$SCRIPT" ]; then
echo "You do not have permission to upgrade the installation in $SCRIPT"
exit 1
else
TARGET_VERSION="${2:-stable}"
echo "The script at $SCRIPT will be upgraded to the latest $TARGET_VERSION version."
echo -n "Do you want to continue [Y/n]? "
read RESP
case "$RESP" in
y|Y|"")
echo
echo "Upgrading..."
TARGET="/tmp/lein-$$-upgrade"
if $cygwin; then
TARGET=`cygpath -w $TARGET`
fi
LEIN_SCRIPT_URL="https://github.com/technomancy/leiningen/raw/$TARGET_VERSION/bin/lein"
$HTTP_CLIENT "$TARGET" "$LEIN_SCRIPT_URL"
if [ $? == 0 ]; then
cmp -s "$TARGET" "$SCRIPT"
if [ $? == 0 ]; then
echo "Leiningen is already up-to-date."
fi
mv "$TARGET" "$SCRIPT" && chmod +x "$SCRIPT"
exec "$SCRIPT" version
else
download_failed_message "$LEIN_SCRIPT_URL"
fi;;
*)
echo "Aborted."
exit 1;;
esac
fi
else
if $cygwin; then
# When running on Cygwin, use Windows-style paths for java
ORIGINAL_PWD=`cygpath -w "$ORIGINAL_PWD"`
fi
# apply context specific CLASSPATH entries
if [ -f .lein-classpath ]; then
add_path CLASSPATH "$(cat .lein-classpath)"
fi
if [ $DEBUG ]; then
echo "Leiningen's classpath: $CLASSPATH"
fi
if [ -r .lein-fast-trampoline ]; then
export LEIN_FAST_TRAMPOLINE='y'
fi
if [ "$LEIN_FAST_TRAMPOLINE" != "" ] && [ -r project.clj ]; then
INPUTS="$@ $(cat project.clj) $LEIN_VERSION $(test -f "$LEIN_HOME/profiles.clj" && cat "$LEIN_HOME/profiles.clj")"
export INPUT_CHECKSUM=$(echo $INPUTS | shasum - | cut -f 1 -d " ")
# Just don't change :target-path in project.clj, mkay?
TRAMPOLINE_FILE="target/trampolines/$INPUT_CHECKSUM"
else
if hash mktemp 2>/dev/null; then
# Check if mktemp is available before using it
TRAMPOLINE_FILE="$(mktemp /tmp/lein-trampoline-XXXXXXXXXXXXX)"
else
TRAMPOLINE_FILE="/tmp/lein-trampoline-$$"
fi
trap "rm -f $TRAMPOLINE_FILE" EXIT
fi
if $cygwin; then
TRAMPOLINE_FILE=`cygpath -w $TRAMPOLINE_FILE`
fi
if [ "$INPUT_CHECKSUM" != "" ] && [ -r "$TRAMPOLINE_FILE" ]; then
if [ $DEBUG ]; then
echo "Fast trampoline with $TRAMPOLINE_FILE."
fi
exec sh -c "exec $(cat $TRAMPOLINE_FILE)"
else
export TRAMPOLINE_FILE
"$LEIN_JAVA_CMD" \
"${BOOTCLASSPATH[@]}" \
-Dfile.encoding=UTF-8 \
-Dmaven.wagon.http.ssl.easy=false \
-Dmaven.wagon.rto=10000 \
$LEIN_JVM_OPTS \
-Dleiningen.original.pwd="$ORIGINAL_PWD" \
-Dleiningen.script="$SCRIPT" \
-classpath "$CLASSPATH" \
clojure.main -m leiningen.core.main "$@"
EXIT_CODE=$?
if $cygterm ; then
stty icanon echo > /dev/null 2>&1
fi
## TODO: [ -r "$TRAMPOLINE_FILE" ] may be redundant? A trampoline file
## is always generated these days.
if [ -r "$TRAMPOLINE_FILE" ] && [ "$LEIN_TRAMPOLINE_WARMUP" = "" ]; then
TRAMPOLINE="$(cat $TRAMPOLINE_FILE)"
if [ "$INPUT_CHECKSUM" = "" ]; then
rm $TRAMPOLINE_FILE
fi
if [ "$TRAMPOLINE" = "" ]; then
exit $EXIT_CODE
else
exec sh -c "exec $TRAMPOLINE"
fi
else
exit $EXIT_CODE
fi
fi
fi
|
AeroNotix/brim-template
|
resources/leiningen/new/brim/lein.sh
|
Shell
|
epl-1.0
| 11,958 |
cd $(dirname "$0")
$NDK/ndk-build -B "$@"
# Find instruction in file
# $1: instruction
# $2: file
find_instruction ()
{
local INST=$1
local FILE=$2
grep -q -w -F -e $INST $FILE
if [ $? != 0 ]; then
echo "$INST expected in file $FILE"
exit 1
fi
}
find_instruction "madd.d" mips-fp4-test1-2.s
find_instruction "msub.d" mips-fp4-test1-2.s
find_instruction "nmadd.d" mips-fp4-test3-6.s
find_instruction "nmsub.d" mips-fp4-test3-6.s
find_instruction "recip.d" mips-fp4-test3-6.s
find_instruction "rsqrt.d" mips-fp4-test3-6.s
rm -f *.s *.i
|
efortuna/AndroidSDKClone
|
ndk_experimental/tests/build/mips-fp4/build.sh
|
Shell
|
apache-2.0
| 575 |
#!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A configuration for Kubemark cluster. It doesn't need to be kept in
# sync with gce/config-default.sh (except the filename, because I'm reusing
# gce/util.sh script which assumes config filename), but if some things that
# are enabled by default should not run in hollow clusters, they should be disabled here.
source "${KUBE_ROOT}/cluster/gce/config-common.sh"
GCLOUD=gcloud
ZONE=${KUBE_GCE_ZONE:-us-central1-b}
REGION=${ZONE%-*}
NUM_NODES=${KUBEMARK_NUM_NODES:-10}
MASTER_SIZE=${KUBEMARK_MASTER_SIZE:-n1-standard-$(get-master-size)}
MASTER_DISK_TYPE=pd-ssd
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-$(get-master-disk-size)}
MASTER_ROOT_DISK_SIZE=${KUBEMARK_MASTER_ROOT_DISK_SIZE:-$(get-master-root-disk-size)}
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false}
PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false}
NODE_ACCELERATORS=${NODE_ACCELERATORS:-""}
CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false}
EVENT_PD=${EVENT_PD:-false}
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-gci}
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-gci}
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-cos-stable-63-10032-71-0}
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
# GPUs supported in GCE do not have compatible drivers in Debian 7.
if [[ "${NODE_OS_DISTRIBUTION}" == "debian" ]]; then
NODE_ACCELERATORS=""
fi
NETWORK=${KUBE_GCE_NETWORK:-e2e}
if [[ "${CREATE_CUSTOM_NETWORK}" == true ]]; then
SUBNETWORK="${SUBNETWORK:-${NETWORK}-custom-subnet}"
fi
INSTANCE_PREFIX="${INSTANCE_PREFIX:-"default"}"
MASTER_NAME="${INSTANCE_PREFIX}-kubemark-master"
AGGREGATOR_MASTER_NAME="${INSTANCE_PREFIX}-kubemark-aggregator"
MASTER_TAG="kubemark-master"
ETCD_QUORUM_READ="${ENABLE_ETCD_QUORUM_READ:-false}"
EVENT_STORE_NAME="${INSTANCE_PREFIX}-event-store"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-$(get-cluster-ip-range)}"
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}"
TERMINATED_POD_GC_THRESHOLD=${TERMINATED_POD_GC_THRESHOLD:-100}
KUBE_APISERVER_REQUEST_TIMEOUT=300
ETCD_COMPACTION_INTERVAL_SEC="${KUBEMARK_ETCD_COMPACTION_INTERVAL_SEC:-}"
# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.1.10) if you need
# non-default version.
ETCD_IMAGE="${TEST_ETCD_IMAGE:-}"
ETCD_VERSION="${TEST_ETCD_VERSION:-}"
# Storage backend. 'etcd2' and 'etcd3' are supported.
STORAGE_BACKEND=${STORAGE_BACKEND:-}
# Storage media type: application/json and application/vnd.kubernetes.protobuf are supported.
STORAGE_MEDIA_TYPE=${STORAGE_MEDIA_TYPE:-}
# Default Log level for all components in test clusters and variables to override it in specific components.
TEST_CLUSTER_LOG_LEVEL="${TEST_CLUSTER_LOG_LEVEL:---v=2}"
KUBELET_TEST_LOG_LEVEL="${KUBELET_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
API_SERVER_TEST_LOG_LEVEL="${API_SERVER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
CONTROLLER_MANAGER_TEST_LOG_LEVEL="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
SCHEDULER_TEST_LOG_LEVEL="${SCHEDULER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
KUBEPROXY_TEST_LOG_LEVEL="${KUBEPROXY_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
TEST_CLUSTER_DELETE_COLLECTION_WORKERS="${TEST_CLUSTER_DELETE_COLLECTION_WORKERS:---delete-collection-workers=16}"
TEST_CLUSTER_MAX_REQUESTS_INFLIGHT="${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT:-}"
TEST_CLUSTER_RESYNC_PERIOD="${TEST_CLUSTER_RESYNC_PERIOD:-}"
# ContentType used by all components to communicate with apiserver.
TEST_CLUSTER_API_CONTENT_TYPE="${TEST_CLUSTER_API_CONTENT_TYPE:-}"
KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS="${KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS:-}"
CUSTOM_ADMISSION_PLUGINS="${CUSTOM_ADMISSION_PLUGINS:-Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,PodPreset,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota}"
# Master components' test arguments.
APISERVER_TEST_ARGS="${KUBEMARK_APISERVER_TEST_ARGS:-} --runtime-config=extensions/v1beta1 ${API_SERVER_TEST_LOG_LEVEL} ${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT} ${TEST_CLUSTER_DELETE_COLLECTION_WORKERS}"
CONTROLLER_MANAGER_TEST_ARGS="${KUBEMARK_CONTROLLER_MANAGER_TEST_ARGS:-} ${CONTROLLER_MANAGER_TEST_LOG_LEVEL} ${TEST_CLUSTER_RESYNC_PERIOD} ${TEST_CLUSTER_API_CONTENT_TYPE} ${KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS}"
SCHEDULER_TEST_ARGS="${KUBEMARK_SCHEDULER_TEST_ARGS:-} ${SCHEDULER_TEST_LOG_LEVEL} ${TEST_CLUSTER_API_CONTENT_TYPE} ${KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS}"
# Hollow-node components' test arguments.
KUBELET_TEST_ARGS="--max-pods=100 $TEST_CLUSTER_LOG_LEVEL ${TEST_CLUSTER_API_CONTENT_TYPE}"
KUBEPROXY_TEST_ARGS="${KUBEPROXY_TEST_LOG_LEVEL} ${TEST_CLUSTER_API_CONTENT_TYPE}"
USE_REAL_PROXIER=${USE_REAL_PROXIER:-true} # for hollow-proxy
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
ALLOCATE_NODE_CIDRS=true
# Optional: Enable cluster autoscaler.
ENABLE_KUBEMARK_CLUSTER_AUTOSCALER="${ENABLE_KUBEMARK_CLUSTER_AUTOSCALER:-false}"
# When using Cluster Autoscaler, always start with one hollow-node replica.
# NUM_NODES should not be specified by the user. Instead we use
# NUM_NODES=KUBEMARK_AUTOSCALER_MAX_NODES. This gives other cluster components
# (e.g. kubemark master, Heapster) enough resources to handle maximum cluster size.
if [[ "${ENABLE_KUBEMARK_CLUSTER_AUTOSCALER}" == "true" ]]; then
NUM_REPLICAS=1
if [[ ! -z "$NUM_NODES" ]]; then
echo "WARNING: Using Cluster Autoscaler, ignoring NUM_NODES parameter. Set KUBEMARK_AUTOSCALER_MAX_NODES to specify maximum size of the cluster."
fi
fi
# Optional: set feature gates
FEATURE_GATES="${KUBE_FEATURE_GATES:-ExperimentalCriticalPodAnnotation=true}"
if [[ ! -z "${NODE_ACCELERATORS}" ]]; then
FEATURE_GATES="${FEATURE_GATES},Accelerators=true"
fi
# Enable a simple "AdvancedAuditing" setup for testing.
ENABLE_APISERVER_ADVANCED_AUDIT="${ENABLE_APISERVER_ADVANCED_AUDIT:-false}"
# Optional: enable pod priority
ENABLE_POD_PRIORITY="${ENABLE_POD_PRIORITY:-}"
if [[ "${ENABLE_POD_PRIORITY}" == "true" ]]; then
FEATURE_GATES="${FEATURE_GATES},PodPriority=true"
fi
# The number of services that are allowed to sync concurrently. Will be passed
# into kube-controller-manager via `--concurrent-service-syncs`
CONCURRENT_SERVICE_SYNCS="${CONCURRENT_SERVICE_SYNCS:-}"
|
humblec/external-storage
|
vendor/k8s.io/kubernetes/cluster/kubemark/gce/config-default.sh
|
Shell
|
apache-2.0
| 6,843 |
#!/usr/bin/env bash
benchmarks=$(pwd)/$( dirname "${BASH_SOURCE[0]}" )/*
for benchmark in $benchmarks
do
if ! [[ "${benchmark#*.}" =~ (rb|sh)$ ]]; then
$benchmark
echo "================================================================================================"
echo ""
fi
done
|
Erol/hanami-router
|
benchmarks/run.sh
|
Shell
|
mit
| 300 |
#!/usr/bin/env bash
set -e
###############################################################################
#
# all-tests.sh
#
# Execute tests for edx-platform. This script is designed to be the
# entry point for various CI systems.
#
###############################################################################
# Violations thresholds for failing the build
export PYLINT_THRESHOLD=3750
export ESLINT_THRESHOLD=10162
SAFELINT_THRESHOLDS=`cat scripts/safelint_thresholds.json`
export SAFELINT_THRESHOLDS=${SAFELINT_THRESHOLDS//[[:space:]]/}
doCheckVars() {
if [ -n "$CIRCLECI" ] ; then
SCRIPT_TO_RUN=scripts/circle-ci-tests.sh
elif [ -n "$JENKINS_HOME" ] ; then
source scripts/jenkins-common.sh
SCRIPT_TO_RUN=scripts/generic-ci-tests.sh
fi
}
# Determine the CI system for the environment
doCheckVars
# Run appropriate CI system script
if [ -n "$SCRIPT_TO_RUN" ] ; then
$SCRIPT_TO_RUN
# Exit with the exit code of the called script
exit $?
else
echo "ERROR. Could not detect continuous integration system."
exit 1
fi
|
chrisndodge/edx-platform
|
scripts/all-tests.sh
|
Shell
|
agpl-3.0
| 1,087 |
add new file
|
rizkaz/TestRebase
|
test.sh
|
Shell
|
mit
| 13 |
#!/bin/bash
mkdir -p ${PREFIX}/bin/
mkdir -p ${PREFIX}/share/amplify/
mkdir -p ${PREFIX}/share/amplify/src/
mkdir -p ${PREFIX}/share/amplify/models/
cp -r src/. ${PREFIX}/share/amplify/src
cp -r models/. ${PREFIX}/share/amplify/models
echo "#!/bin/bash" > ${PREFIX}/bin/AMPlify
echo "${PREFIX}/share/amplify/src/AMPlify.py \$@" >> ${PREFIX}/bin/AMPlify
echo "#!/bin/bash" > ${PREFIX}/bin/train_amplify
echo "${PREFIX}/share/amplify/src/train_amplify.py \$@" >> ${PREFIX}/bin/train_amplify
|
cokelaer/bioconda-recipes
|
recipes/amplify/build.sh
|
Shell
|
mit
| 492 |
#!/bin/bash
FN="ALLMLL_1.34.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.14/data/experiment/src/contrib/ALLMLL_1.34.0.tar.gz"
"https://bioarchive.galaxyproject.org/ALLMLL_1.34.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-allmll/bioconductor-allmll_1.34.0_src_all.tar.gz"
)
MD5="687fe88d86d18596cf736fc9d3076cd7"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
cokelaer/bioconda-recipes
|
recipes/bioconductor-allmll/post-link.sh
|
Shell
|
mit
| 1,282 |
#!/bin/bash
bench="bin/clang-linux-3.2/release/threading-multi/osc_chain_1d"
repeat=5
maxnodes=16
function run {
n=$1
steps=$2
for ((nodes=1 ; nodes < $maxnodes ; nodes++)) ; do
# swap stderr & stdout
mpirun -np $nodes $bench $n $steps $repeat 3>&1 1>&2 2>&3
done
}
function run_all {
printf "n\tsteps\tnodes\ttime\n"
run 256 1024
run 4096 1024
run 4194304 1
}
run_all | tee osc_chain_speedup.dat
|
rkq/cxxexp
|
third-party/src/boost_1_56_0/libs/numeric/odeint/performance/mpi/osc_chain_speedup.sh
|
Shell
|
mit
| 450 |
#!/bin/bash
#
# ircDDBGateway start script for GUI
# Hans-J. Barthen (DL5DI) - [email protected]
#
# description: Starts the G4KLX ircDDBGateway in GUI mode and sets the LC_NUMERIC LOCALE
# to en_US.UTF-8 to make the configfile compatible for GUI and daemon mode.
# config: /etc/sysconfig/ircddbgateway
#
if [ $UID -ne 0 ]; then
echo
echo "ERROR: This script must be run as the root user!"
echo " Please use 'su' or log in as root and try again."
echo
exit 1
fi
# start main task with correct LOCALE settings
LC_NUMERIC="en_US.UTF-8" /usr/local/bin/xreflector -gui -logdir=/var/log/dstar
|
hgraves/My_DVMega
|
LinuxExtras/CentOS/XReflector/usr/local/bin/xreflector.sh
|
Shell
|
gpl-2.0
| 598 |
ARCH=avr:3
MACHINE=
SCRIPT_NAME=elf32avr
OUTPUT_FORMAT="elf32-avr"
MAXPAGESIZE=1
EMBEDDED=yes
TEMPLATE_NAME=generic
TEXT_LENGTH=64K
DATA_LENGTH=4000
EEPROM_LENGTH=2K
DATA_START=0x60
STACK=0x0FFF
|
pumpkin83/OpenUH-OpenACC
|
osprey/cygnus/ld/emulparams/avrmega603.sh
|
Shell
|
gpl-3.0
| 196 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.