code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
v_dir=$1
if [ ! -d $v_dir ]; then
echo "$v_dir does not exist. will create it."
mkdir $v_dir
fi
cp -r clustering.cpp\
CMakeLists.txt\
config.hpp.cmake.in\
density_clustering.cpp\
density_clustering.hpp\
density_clustering_mpi.cpp\
density_clustering_mpi.hpp\
density_clustering_common.hpp\
density_clustering_common.cpp\
FindOpenCL.cmake\
cl.hpp\
density_clustering_opencl.hpp\
density_clustering_opencl.cpp\
pops.cl\
generate_header.py\
logger.cpp\
logger.hpp\
mpp.cpp\
mpp.hpp\
network_builder.cpp\
network_builder.hpp\
README.md\
state_filter.cpp\
state_filter.hpp\
tools.cpp\
tools.hpp\
tools.hxx\
coring.cpp\
coring.hpp\
embedded_cytoscape.hpp\
doc\
doxygen.config\
coords_file\
$v_dir/
tar cf $v_dir.tar $v_dir/
gzip $v_dir.tar
|
lettis/Clustering
|
make_version.sh
|
Shell
|
bsd-2-clause
| 941 |
#!/bin/bash
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# We're in the tools directory.
ROOT_DIR="$(dirname "$(readlink -f "$0")")/.."
source "${ROOT_DIR}/tools/common.sh"
cd "${ROOT_DIR}"
DARTFMT="${DART_SDK_DIR}/bin/dartfmt"
find . -name '*.dart' -print0 | xargs -0 "$DARTFMT" -w
|
viettrungluu-cr/vterm
|
tools/dartfmt_all.sh
|
Shell
|
bsd-3-clause
| 402 |
#!/bin/bash
# create animal thing class
curl localhost:8080/v1/schema/things -H 'Content-Type: application/json' -d @./tools/dev/example-payloads/animal-class.json
# create zoo thing class
curl localhost:8080/v1/schema/things -H 'Content-Type: application/json' -d @./tools/dev/example-payloads/zoo-class.json
# create an elephant
curl localhost:8080/v1/things -H 'Content-Type: application/json' -d @./tools/dev/example-payloads/animal-instance-elephant.json
# # create a zoo
# curl localhost:8080/v1/things -H 'Content-Type: application/json' -d @./tools/dev/example-payloads/zoo-instance-nowhere.json
# create an incorrect class
# curl localhost:8080/v1/schema/things -H 'Content-Type: application/json' -d @./tools/dev/example-payloads/incorrect-class.json
# create an incorrect property on the incorrect class
curl localhost:8080/v1/schema/things/IncorrectAnimal/properties -H 'Content-Type: application/json' -d @./tools/dev/example-payloads/incorrect-class-incorrect-property.json
|
weaviate/weaviate
|
tools/dev/example-payloads/examples.sh
|
Shell
|
bsd-3-clause
| 994 |
#!/bin/bash
# Switch to the directory with the project
cd /home/pi/closedloopcoast-connector/
# Update nodejs
sudo apt install nodejs
# Get the latest code from github
git pull origin master
# Fetch dependencies if there are updates and install them
npm install
# Start node app as a user pi
su pi -c 'screen -dm -S closedloopcoast node /home/pi/closedloopcoast-connector/index.js < /dev/null &'
|
Relearn/closedloopcoast-connector
|
onboot.sh
|
Shell
|
bsd-3-clause
| 395 |
#!/bin/bash
# author: Liang Gong
if [ "$(uname)" == "Darwin" ]; then
# under Mac OS X platform
NODE='node'
elif [ "$(expr substr $(uname -s) 1 5)" == "Linux" ]; then
# under GNU/Linux platform
NODE='nodejs'
fi
cd directory-traversal/serverhuwenhui
RED='\033[0;31m'
BLUE='\033[0;34m'
GREEN='\033[0;32m'
NC='\033[0m' # No Color
# start the server
echo -e "\t[${GREEN}start vulnerable server${NC}]: ${BLUE}serverhuwenhui${NC}"
$NODE test.js >/dev/null 2>&1 &
vulnpid=$!
# wait for the server to get started
sleep 1.5s
echo -e "\t[${GREEN}server root directory${NC}]: `pwd`"
# utilize directory traversal to get files outside the working directory
# trigger directory traversal issues: send a request to retrieve the confidential file outside the working directory
$NODE attack.js
# kill the vulnerable npm package's process
kill -9 $vulnpid
|
JacksonGL/NPM-Vuln-PoC
|
directory-traversal/serverhuwenhui/PoC.sh
|
Shell
|
bsd-3-clause
| 870 |
#!/bin/bash
# key_convert.sh Convert private keys into PEM format for AMD usage
# Chris Vidler - Dynatrace DCRUM SME 2016
#
# config
DEBUG=0
# script below do not edit
OPTS=0
while getopts ":k:hds" OPT; do
case $OPT in
k)
KEYFILE=$OPTARG
OPTS=1
;;
h)
;;
d)
DEBUG=$((DEBUG + 1))
;;
s)
STDIN=" -password stdin"
;;
\?)
echo "*** FATAL: Invalid argument -$OPTARG."
exit 1
;;
:)
echo "*** FATAL: argument -$OPTARG requires parameter."
exit 1
;;
esac
done
if [ $OPTS -eq 0 ]; then
echo "*** INFO: Usage: $0 [-h] [-s] -k keyfile"
echo "-h help"
echo "-s Accept openssl passwords from stdin (for scripted execution)"
exit 0
fi
function debugecho {
dbglevel=${2:-1}
if [ $DEBUG -ge $dbglevel ]; then techo "*** DEBUG[$dbglevel]: $1"; fi
}
function techo {
echo -e "[`date -u`]: $1"
}
# do some rudimentary checks the file exists.
if [ $KEYFILE == foo ]; then
techo "***FATAL: Required filename parameter missing."
exit 1
fi
if [ ! -r $KEYFILE ]; then
techo "*** FATAL: Key file [$KEYFILE] not readable."
exit 1
fi
techo "Reading key: $KEYFILE"
# get extension
KEYEXT=`basename $KEYFILE`
KEYEXT=${KEYEXT##*.}
debugecho "KEYEXT: [$KEYEXT]"
case "$KEYEXT" in
key)
TYPE=pem
;;
pem)
TYPE=pem
;;
der)
TYPE=der
;;
pfx)
TYPE=p12
;;
p12)
TYPE=p12
;;
jks)
TYPE=jks
;;
*)
echo -e "*** FATAL: Unable to determine key format of $KEYFILE. Script supports only PEM, DER, JKS and P12/PFX formats"
exit 1
;;
esac
if [ $TYPE == jks ]; then
# open JKS, allow user to pick a private key (multiple are possible), convert that key to a PKCS12 file, and then use the PKCS12 methods below.
techo "Extracting key from Java Key Store format file"
techo "***WARNING: experimental support for JKS"
if [ ! -x "/usr/lib/jvm/jre-openjdk/bin/keytool" ]; then
KEYTOOL=`which keytool`
if [ $? -ne 0 ]; then techo "***FATAL Java keytool utility required for JKS extraction, not found. Aborting."; exit 1; fi
else
KEYTOOL="/usr/lib/jvm/jre-openjdk/bin/keytool"
fi
# get a list of private keys by alias, with blank password (no authenticity check, but user doesn't get prompted for anything)
RESULTS=$(echo -e '\n' | $KEYTOOL -list -storetype jks -keystore $KEYFILE 2> /dev/null | grep -A 1 "PrivateKeyEntry" )
NUMKEYS=0
NUMKEYS=$(echo $RESULTS | grep "PrivateKeyEntry" | wc -l )
if [ $NUMKEYS == 0 ]; then techo "No private keys found in JKS file [$KEYFILE], aborting."; exit 1; fi
#extract alias names
echo "Choose key to extract:"
echo "#, alias, creation date, certificate fingerprint"
IFS=','
KEYNUM=0
declare -a ALIASES
while read -r a c k; do
if [[ $a == "Certificate fingerprint"* ]]; then echo -e "${a#*:}"; continue; fi
KEYNUM=$((KEYNUM + 1))
echo -en "$KEYNUM: $a,$c,"
ALIASES[$KEYNUM]=$a
done < <(echo -e "$RESULTS")
echo -e "Extract key #: "
read -ei "1"
if [ $REPLY -ge 1 ] 2> /dev/null && [ $REPLY -le $KEYNUM ] 2> /dev/null ; then
techo "Extracting key [$REPLY]"
else
techo "Invalid key number entered, aborting."
exit 1
fi
SRCALIAS=${ALIASES[$REPLY]}
debugecho "ALIASES: [${ALIASES[*]}]"
debugecho "SRCALIAS: [$SRCALIAS]"
#extract the key, because keytool and JKS suck, convert to PKCS12 first, then let script continue on...
techo "Converting JKS to PKCS12"
echo -e "JKS keystore password: "
read -se PASSWORD
#append alias name to file name for uniqueness
P12FILE=${KEYFILE%.*}-$SRCALIAS.p12
keytool -importkeystore -srckeystore $KEYFILE -destkeystore $P12FILE -deststoretype PKCS12 -srcalias "$SRCALIAS" -srcstorepass "$PASSWORD" -deststorepass "$PASSWORD"
if [ $? -ne 0 ]; then techo "JKS conversion failed. Aborting."; exit 1; fi
PASSWORD=""
#change type and input name, so script can carry on as if a PKCS12 file was provided.
TYPE="p12"
KEYFILE=$P12FILE
fi
#generate output file name
OUTFILE=${KEYFILE%.*}_decr.key
debugecho "OUTFILE: [$OUTFILE]"
if [ $TYPE == p12 ]; then
# extract private key from pkcs12 format file
techo "Extracting key from PKCS12 file"
#openssl pkcs12 -in $KEYFILE -out $OUTFILE -nocerts -nodes 2> /dev/null
openssl pkcs12 -in $KEYFILE -out $OUTFILE -clcerts -nodes $STDIN 2> /dev/null
RESULT=$?
if [ $RESULT -ne 0 ]; then
techo "*** FATAL: Couldn't extract private key from PKCS12 file $KEYFILE"
exit 1
fi
KEYFILE=$OUTFILE
TYPE=pem
fi
if [ $TYPE == der ]; then
# convert DER to PEM
echo -e "Converting DER to PEM..."
openssl rsa -inform $TYPE -outform PEM -in $KEYFILE -out $OUTFILE 2> /dev/null
RESULT=$?
if [ $RESULT -ne 0 ]; then
techo "*** FATAL: Couldn't convert DER to PEM"
exit 1
fi
KEYFILE=$OUTFILE
TYPE=pem
fi
EXPIRY=""
#if present, examine certificate details and extract expiration date.
if [ -r ${KEYFILE%.*}.crt ]; then
techo "Checking for certificate ${KEYFILE%.*}.crt"
EXPIRY=`openssl x509 -noout -enddate -in ${KEYFILE%.*}.crt`
RESULT=$?
CN=`openssl x509 -noout -subject -nameopt oneline -in ${KEYFILE%.*}.crt`
debugecho "CN: [$CN]"
elif [ $TYPE == "pem" ]; then
techo "Checking PEM $KEYFILE for included certificate"
EXPIRY=`openssl x509 -noout -enddate -in ${KEYFILE}`
CN=`openssl x509 -noout -subject -nameopt oneline -in ${KEYFILE}`
RESULT=$?
fi
if [ $RESULT -eq 0 ]; then
#have an expiry date to use
EXPIRY=${EXPIRY%%.*\=}
debugecho "EXPIRY: [$EXPIRY]"
EXPDATE=`date -d "${EXPIRY##*=}" +%C%y%m%d`
debugecho "EXPDATE: [$EXPDATE]"
EXPDATE="EXP-$EXPDATE"
#grab CN
CN=${CN##*CN = }
debugecho "CN: [$CN]"
if [ ! "$CN" == "" ]; then
CN="CN-$CN"
else
CN="${KEYFILE%.*}"
fi
OUTFILE="${EXPDATE}-${CN}.${KEYFILE%.*}_decr.key"
debugecho "KEYFILE: [$KEYFILE] OUTFILE: [$OUTFILE]"
fi
techo "Validating key file: $KEYFILE"
# check if it's valid using openssl
# check with a hopefully incorrect password being passed to see if it's encrypted or not, if it is the wrong password will fail, if not it'll work silently. In the odd case it is encrypted and we've got the right password it'll succeed silently, and be reported as unencrypted.
openssl rsa -check -inform $TYPE -in $KEYFILE -noout -passin pass:dummy972345uofugsoyy8wtpassword 2> /dev/null
RETURN=$?
if [ $RETURN -ne 0 ]; then
# check without a fake password, if it's encrypted user will be prompted, otherwise it's a invalid key/wrong format etc.
techo "Key may be encrypted."
openssl rsa -check -inform $TYPE -in $KEYFILE -noout 2> /dev/null
RETURN=$?
if [ $RETURN -ne 0 ]; then
techo "*** FATAL: $KEYFILE invalid (not RSA), wrong format (not PEM) or wrong password."
exit 1
fi
techo "$KEYFILE valid, but encrypted."
echo -n "Decrypt it? (yes|NO) "
read YNO
case $YNO in
[yY] | [yY][Ee][Ss] )
# output a decrypted key
OUTFILE=${KEYFILE%.*}_decr.key
openssl rsa -in $KEYFILE -outform PEM -out $OUTFILE
RETURN=$?
if [ $RETURN -ne 0 ]; then
techo "*** FATAL: Couldn't decrypt key. Wrong password?"
exit 1
fi
KEYFILE=$OUTFILE
techo "New key file: $KEYFILE ready to install to AMD, use rtm_install_key.sh"
exit 0
;;
*)
techo "Not decrypting key, kpadmin will be needed to load the key into the AMD."
exit 0
;;
esac
fi
techo "Complete. Saved: $KEYFILE"
exit 0
|
cvidler/rtm_install_key
|
key_convert.sh
|
Shell
|
bsd-3-clause
| 7,164 |
#!/bin/bash
# From: https://circleci.com/docs/1.0/nightly-builds/
_project=$1
_branch=$2
_circle_token=$3
trigger_build_url=https://circleci.com/api/v1.1/project/github/${_project}/tree/${_branch}?circle-token=${_circle_token}
post_data=$(cat <<EOF
{
"build_parameters": {
"TRIGGERED_BUILD": "true"
}
}
EOF
)
curl \
--header "Accept: application/json" \
--header "Content-Type: application/json" \
--data "${post_data}" \
--request POST ${trigger_build_url}
|
AlexeyRaga/eta
|
utils/scripts/circleci-trigger.sh
|
Shell
|
bsd-3-clause
| 470 |
#!/usr/bin/env bash
set -e # stop immediately on error
# preprocessing of a macaque structural image
# 1. brain extraction
# 2. bias correction
# 3. reference registration
# these steps are dependent on each other and could therefore be repeated for
# the best results
# TODO: calculate the flirt cost based on the brain extracted image
# ------------------------------ #
# usage
# ------------------------------ #
usage() {
cat <<EOF
Preprocess macaque structural MRI. Brain extraction, bias correction, and
reference registration.
example:
struct_macaque.sh --subjdir=MAC1 --all
struct_macaque.sh --subjdir=MAC1 --once
struct_macaque.sh --subjdir=MAC1 --structImg=struct/struct --betorig --biascorr
usage: struct_macaque.sh
instructions:
[--all] : execute all inctructions, twice: --robustfov --betorig --biascorr
--betrestore --register --brainmask --biascorr --register --brainmask
--segment
[--once] : execute all instructions once: --robustfov --betorig --biascorr
--betrestore --register --brainmask --segment
[--robustfov] : robust field-of-view cropping
[--betorig] : rough brain extraction of the original structural
[--betrestore] : brain extraction of the restored structural
[--biascorr] : correct the spatial bias in signal intensity
[--register] : register to the reference and warp the refMask back
[--brainmask] : retrieve the brain mask from the reference and polish
[--segment] : segment the structural image in CSF, GM, and WM compartments
[--hemimask] : create masks for each hemisphere (left/right)
settings:
[--subjdir=<subject dir>] default: <current directory>
[--structdir=<structural dir>] default: <subjdir>/struct
[--structimg=<structural image>] default: <structdir>/struct
the <structdir> can be inferred from <structImg>, if provided
[--structmask=<structural brain mask>] default: <structimg>_brain_mask
[--transdir=<transform dir>] default: <subjdir>/transform
[--scriptdir=<script dir>] default: <parent directory of struct_macaque.sh>
path to bet_macaque.sh and robustfov_macaque.sh scripts
[--refdir=<reference dir>] default: <inferred from refimg, or scriptdir>
path to reference images
[--fovmm=<XSIZExYSIZExZSIZE> default: 128x128x64
field-of-view in mm, for robustfov_macaque
[--config=<fnirt config file> default: <scriptdir>/fnirt_1mm.cnf
[--refspace=<reference space name>] default: F99, alternative: SL, MNI
[--refimg=<ref template image>] default: <scriptdir>/<refspace>/McLaren
[--refmask=<reference brain mask>] default: <refimg>_brain_mask
[--refweightflirt=<ref weights for flirt>] default <refmask>
[--refmaskfnirt=<ref brain mask for fnirt>] default <refmask>
[--flirtoptions]=<extra options for flirt>] default none
EOF
}
# ------------------------------ #
# process and test the input arguments
# ------------------------------ #
# if no arguments given, or help is requested, return the usage
[[ $# -eq 0 ]] || [[ $@ =~ --help ]] && usage && exit 0
# if not given, retrieve directory of this script
[[ $0 == */* ]] && thisscript=$0 || thisscript="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"/$0
# if "--all" is given, run the default set
if [[ $@ =~ --all$ ]] || [[ $@ =~ "--all " ]] ; then
# the default arguments associated with "--all" / "--nonlin"
defaultset="--robustfov --betorig --biascorr --betrestore --register --brainmask --biascorr --register --brainmask --hemimask --segment"
echo "running the complete set of instructions: $defaultset"
# replace "--all" with the default argument set
newargs=$(echo "${@//--all/$defaultset}")
# execute this script with the default argument set, and passing others
sh $thisscript $newargs
exit 0
elif [[ $@ =~ --once$ ]] || [[ $@ =~ "--once " ]] ; then
# the default arguments associated with "--all" / "--nonlin"
defaultset="--robustfov --betorig --biascorr --betrestore --register --brainmask --hemimask --segment"
echo "running the complete set of instructions: $defaultset"
# replace "--once" with the default argument set
newargs=$(echo "${@//--once/$defaultset}")
# execute this script with the default argument set, and passing others
sh $thisscript $newargs
exit 0
fi
# run each instruction on its own (with the same definitions)
definitionargs=$(echo "$@" | tr " " "\n" | grep '=') || true
instructargs=$(echo "$@" | tr " " "\n" | grep -v '=') || true
if [[ $(echo "$instructargs" | wc -w) -gt 1 ]] ; then
# this ensures the instructions are executed as specified, not as coded
for instr in $instructargs ; do
sh $thisscript $definitionargs $instr
done
exit 0
fi
# count and grep the number of argument repetitions (ignoring after "=")
duplicates=$(echo "$@" | tr " " "\n" | awk '{ gsub("=.*","="); print $0}' | sort | uniq -c | grep -v '^ *1 ') || true # "|| true" is added to ignore the non-zero exit code of grep (and avoid the script the stop because of "set -e")
# now test if any duplicates were found, and if so, give an error
[[ -n $duplicates ]] && >&2 echo "\nError, repetitions found in the arguments:\n$@\n${duplicates}\n" && exit 1
# ------------------------------ #
# arguments and defaults
# ------------------------------ #
# set defaults
instr=""
subjDir="."
structImg="struct"
structMask=""
[[ -n $MRCATDIR ]] && scriptDir=$MRCATDIR/core &&
echo "Script directory: $scriptDir"
hcpDir="" # legacy, will give warning when set and will be ignored
refDir=$MRCATDIR/data/macaque
transDir="transform"
fovmm="128 128 64"
config="fnirt_1mm.cnf"
refSpace="F99"
refImg="McLaren"
flirtoptions=""
# parse the input arguments
for a in "$@" ; do
case $a in
--subjdir=*) subjDir="${a#*=}"; shift ;;
--structdir=*) structDir="${a#*=}"; shift ;;
--structimg=*) structImg="${a#*=}"; shift ;;
--structmask=*) structMask="${a#*=}"; shift ;;
--transdir=*) transDir="${a#*=}"; shift ;;
--scriptdir=*) scriptDir="${a#*=}"; shift ;;
--hcpdir=*) hcpDir="${a#*=}"; shift ;; # legacy, will give warning and will be ignored
--refdir=*) refDir="${a#*=}"; shift ;;
--fovmm=*) fovmm="${a#*=}"; shift ;;
--config=*) config="${a#*=}"; shift ;;
--refspace=*) refSpace="${a#*=}"; shift ;;
--refimg=*) refImg="${a#*=}"; shift ;;
--refmask=*) refMask="${a#*=}"; shift ;;
--refweightflirt=*) refweightflirt="${a#*=}"; shift ;;
--refmaskfnirt=*) refMaskfnirt="${a#*=}"; shift ;;
--flirtoptions=*) flirtoptions="${a#*=}"; shift ;;
*) instr="$instr $a"; shift ;; # instruction argument
esac
done
# split the fovmm argument, if given
fovmm=$(echo $fovmm | tr "x" " ")
# input dependent defaults
[[ -z $structDir ]] && structDir="${structImg%/*}"
[[ -d $structDir ]] && structDir="$(cd "$structDir" && pwd)"
[[ -z $structDir ]] && structDir="struct"
structImg=${structImg##*/} # remove the directory
structImg=${structImg%%.*} # remove the extension
structImg=${structImg%%_brain*} # remove "_brain" postpad
structImg=${structImg%%_restore*} # remove "_restore" postpad
structMask=${structMask%%.*} # remove the extension
spaceDir="$refSpace"
refImg=${refImg%%.*} # remove the extension
[[ -z $refMask ]] && refMask="${refImg}_brain_mask"
refMask=${refMask%%.*} # remove the extension
#baserefImg=${refImg##*/} # remove the directory
#if [[ ${baserefImg%%.*} == "McLaren" ]] ; then
# [[ -z $refweightflirt ]] && refweightflirt="$refImg"
# [[ -z $refMaskfnirt ]] && refMaskfnirt="${refMask}_strict"
#fi
# sort the location of the different script directories
[[ -z $refweightflirt ]] && refweightflirt="$refMask"
[[ -z $refMaskfnirt ]] && refMaskfnirt="$refMask"
[[ -z $scriptDir ]] && scriptDir="$(cd "$(dirname ${BASH_SOURCE[0]})"/.. && pwd)"
[[ -n $hcpDir ]] && printf '\n\nWarning: The input argument --hcpdir is no longer valid and will be ignored.\n\n' # legacy, will give warning and will be ignored
[[ -z $refDir ]] && refDir=$(cd $MRCATDIR/data/macaque && pwd)
[[ ! -d $refDir ]] && refDir=$scriptDir
# prepad the directory if none is given
[[ $config != */* ]] && config=$MRCATDIR/config/$config
[[ $structDir != */* ]] && structDir=$subjDir/$structDir
[[ $spaceDir != */* ]] && spaceDir=$subjDir/$spaceDir
[[ $transDir != */* ]] && transDir=$subjDir/$transDir
[[ $refImg != */* ]] && refImg=$refDir/$refSpace/$refImg
[[ $refMask != */* ]] && refMask=$refDir/$refSpace/$refMask
[[ $refweightflirt != */* ]] && refweightflirt=$refDir/$refSpace/$refweightflirt
[[ $refMaskfnirt != */* ]] && refMaskfnirt=$refDir/$refSpace/$refMaskfnirt
# ------------------------------ #
# the instructions are coded below
# ------------------------------ #
# first rough brain extraction
if [[ $instr =~ --robustfov$ ]] ; then
# input: original structImg
# output: (cropped) structImg with robust field-of-view
# call robustfov_macaque.sh to ensure a robust field-of-view
$scriptDir/robustfov_macaque.sh $structDir/$structImg -m $fovmm -f
fi
# first rough brain extraction
if [[ $instr =~ --betorig$ ]] || [[ $instr =~ --betrestore$ ]] ; then
# input: original or restored structImg
# output: {structImg}_brain_mask
# definitions
if [[ $instr =~ --betorig$ ]] ; then
img=$structDir/$structImg
fbrain=0.2
niter=3
else
img=$structDir/${structImg}_restore
fbrain=0.25
niter=10
fi
base=$structDir/$structImg
[[ -z $structMask ]] && structMask=${base}_brain_mask
# call bet_macaque.sh for an initial brain extraction
$scriptDir/bet_macaque.sh $img $base --fbrain $fbrain --niter $niter
# remove old brain extractions, and create new ones
imrm ${base}_brain ${img}_brain
[[ -r ${base}.nii.gz ]] && fslmaths $base -mas $structMask ${base}_brain
[[ -r ${img}.nii.gz ]] && fslmaths $img -mas $structMask ${img}_brain
# copy the brain mask for later inspection
imcp $structMask ${structMask}_bet
fi
# bias correct the corrected image
if [[ $instr =~ --biascorr$ ]] ; then
# input: structImg
# output: {structImg}_restore
base=$structDir/${structImg}
[[ -z $structMask ]] && structMask=${base}_brain_mask
echo "bias correcting image: $base"
# ignore dark voxels
thr=$(fslstats ${base}_brain -P 5)
cluster --in=${base}_brain --thresh=$thr --no_table --connectivity=6 --minextent=10000 --oindex=${structMask}_biascorr
# and the super bright
thr=$(fslstats ${base}_brain -P 99.8)
fslmaths ${base}_brain -uthr $thr -mas ${structMask}_biascorr -bin ${structMask}_biascorr
# smoothness definitions
sigma=3
FWHM=$(echo "2.3548 * $sigma" | bc)
# run RobustBiasCorr
$MRCATDIR/core/RobustBiasCorr.sh \
--in=$base \
--workingdir=$structDir/biascorr \
--brainmask=${structMask}_biascorr \
--basename=struct \
--FWHM=$FWHM \
--type=1 \
--forcestrictbrainmask="FALSE" --ignorecsf="FALSE"
# copy the restored image and bias field, and remove working directory
imcp $structDir/biascorr/struct_restore ${base}_restore
imcp $structDir/biascorr/struct_bias ${base}_bias
rm -rf $structDir/biascorr
# clean up
imrm ${structMask}_biascorr
echo " done"
fi
# reference registration
if [[ $instr =~ --register$ ]] ; then
base=$structDir/${structImg}
[[ -z $structMask ]] && structMask=${base}_brain_mask
echo "register ${base}_restore to reference: $refImg"
# ensure the reference and transformation directories exist
mkdir -p $spaceDir
mkdir -p $transDir
# perform linear registration of the structural to reference
echo " linear registration"
flirt -dof 12 -ref $refImg -refweight $refweightflirt -in ${base}_restore -inweight $structMask -omat $transDir/${structImg}_to_${refSpace}.mat $flirtoptions
# check cost of this registration
cost=$(flirt -ref $refImg -in ${base}_restore -schedule $FSLDIR/etc/flirtsch/measurecost1.sch -init $transDir/${structImg}_to_${refSpace}.mat | head -1 | cut -d' ' -f1)
# decide if flirt is good enough or needs another try
if [[ $(echo $cost | awk '($1>0.9){print 1}') ]] ; then
echo " registration is poor: the cost is $cost"
echo " for reference, a value of 0.8 or lower would be nice"
echo " rerunning linear registration with restricted search"
# see if the original flirt was run without search
if [[ $flirtoptions =~ -nosearch ]] ; then
# remove the -nosearch option, but use a restricted schedule (simple3D)
flirt -dof 12 -ref $refImg -refweight $refweightflirt -in ${base}_restore -inweight $structMask -omat $transDir/${structImg}_to_${refSpace}_restricted.mat -schedule $FSLDIR/etc/flirtsch/simple3D.sch ${flirtoptions//-nosearch/}
else
# run flirt without search
flirt -dof 12 -ref $refImg -refweight $refweightflirt -in ${base}_restore -inweight $structMask -omat $transDir/${structImg}_to_${refSpace}_restricted.mat -nosearch $flirtoptions
fi
# calculate cost of restricted registration
costrestr=$(flirt -ref $refImg -in ${base}_restore -schedule $FSLDIR/etc/flirtsch/measurecost1.sch -init $transDir/${structImg}_to_${refSpace}_restricted.mat | head -1 | cut -d' ' -f1)
# check if the new registration is actually better
echo " restricted registration cost is $costrestr"
if [[ $(echo $cost $costrestr | awk '($1<$2){print 1}') ]] ; then
# reject new registration
echo " keeping original registration, but please be warned of poor results"
rm -rf $transDir/${structImg}_to_${refSpace}_restricted.mat
else
if [[ $(echo $costrestr | awk '($1>0.9){print 1}') ]] ; then
echo " continuing, but please be warned of poor registration results"
else
echo " restricted registration is accepted"
fi
# use new registration
mv -f $transDir/${structImg}_to_${refSpace}_restricted.mat $transDir/${structImg}_to_${refSpace}.mat
fi
else
echo " the linear registration cost is $cost"
fi
# invert linear transformation
convert_xfm -omat $transDir/${refSpace}_to_${structImg}.mat -inverse $transDir/${structImg}_to_${refSpace}.mat
# use spline interpolation to apply the linear transformation matrix
applywarp --rel --interp=spline -i ${base}_restore -r $refImg --premat=$transDir/${structImg}_to_${refSpace}.mat -o $spaceDir/${structImg}_restore_lin
fslmaths $spaceDir/${structImg}_restore_lin -thr 0 $spaceDir/${structImg}_restore_lin
applywarp --rel --interp=nn -i $refMask -r $base --premat=$transDir/${refSpace}_to_${structImg}.mat -o ${structMask}_${refSpace}lin
# now preform non-linear registration
echo " non-linear registration"
#fnirt --ref=$refImg --refmask=$refMaskfnirt --in=${base}_restore --inmask=$structMask --aff=$transDir/${structImg}_to_${refSpace}.mat --fout=$transDir/${structImg}_to_${refSpace}_warp --config=$config
fnirt --ref=$refImg --refmask=$refMaskfnirt --in=${base}_restore --aff=$transDir/${structImg}_to_${refSpace}.mat --fout=$transDir/${structImg}_to_${refSpace}_warp --config=$config
# use spline interpolation to apply the warp field
echo " applying and inverting warp"
applywarp --rel --interp=spline -i ${base}_restore -r $refImg -w $transDir/${structImg}_to_${refSpace}_warp -o $spaceDir/${structImg}_restore
fslmaths $spaceDir/${structImg}_restore -thr 0 $spaceDir/${structImg}_restore
# invert the warp field
invwarp -w $transDir/${structImg}_to_${refSpace}_warp -o $transDir/${refSpace}_to_${structImg}_warp -r ${base}
# and ditch the warp coeficient and log
rm -f ${base}*warpcoef*
mv -f ${base}*to_*.log $transDir/
echo " done"
fi
# retrieve and polish the brain mask
if [[ $instr =~ --brainmask$ ]] ; then
# input: {structImg}_restore, {structImg}_brain_mask
# output: {structImg}_brain_mask
base=$structDir/${structImg}
[[ -z $structMask ]] && structMask=${base}_brain_mask
echo "retrieve and polish the brain mask based on: $refImg"
# warp the brain mask from reference to struct
applywarp --rel --interp=nn -i $refMask -r $base -w $transDir/${refSpace}_to_${structImg}_warp -o $structMask
imcp $structMask ${structMask}_$refSpace
# smooth out the brain mask (and just ever so slightly dilate)
fslmaths $structMask -s 1 -thr 0.45 -bin $structMask
# extract the brain
fslmaths ${base}_restore -mas $structMask ${base}_brain
# remove old brain extractions, and create new ones
imrm ${base}_brain ${base}_restore_brain
[[ -r ${base}.nii.gz ]] && fslmaths $base -mas $structMask ${base}_brain
[[ -r ${base}_restore.nii.gz ]] && fslmaths ${base}_restore -mas $structMask ${base}_restore_brain
# and make a strict mask
thr=$(fslstats ${base}_brain -P 5)
cluster --in=${base}_brain --thresh=$thr --no_table --connectivity=6 --minextent=10000 --oindex=${structMask}_strict
fslmaths ${structMask}_strict -bin -fillh -s 0.5 -thr 0.5 -bin -mas $structMask -fillh ${structMask}_strict
echo " done"
fi
# segment
if [[ $instr =~ --segment$ ]] ; then
# input: {structImg}_restore_brain OR {structImg}_restore, {structImg}_brain_mask
# output: CSF GM GMcore GMall WM WMcore
base=$structDir/${structImg}
[[ -z $structMask ]] && structMask=${base}_brain_mask
echo "segment the structural in CSF, GM, WM compartments"
# pick the best image to work on
flgBias="--nobias"
workImg=${base}_restore_brain
if [[ ! -r $workImg.nii.gz ]] ; then
[[ -r ${base}_restore.nii.gz ]] && workImg=${base}_restore || (workImg=${base} && flgBias="")
if [[ -r ${workImg}_brain.nii.gz ]] ; then
workImg=${workImg}_brain
elif [[ -r $structMask.nii.gz ]] ; then
fslmaths $workImg -mas $structMask ${workImg}_brain
workImg=${workImg}_brain
else
>&2 echo "please provide a brain extracted structural image, or provide a brain mask"
exit 1
fi
fi
# definitions
workDir=$(mktemp -d "$structDir/tmp_segment.XXXXXXXXXX")
mkdir -p $workDir
structName=struct_brain
# segment (but priors don't seem to help fast)
echo " running fast to segment the structural (ignoring priors)"
fast --class=3 --type=1 --segments $flgBias -p --out=$workDir/$structName $workImg
echo " polishing compartments based on posterior probability"
# identify segments
median0=$(fslstats $workImg -k $workDir/${structName}_seg_0 -P 50)
median1=$(fslstats $workImg -k $workDir/${structName}_seg_1 -P 50)
median2=$(fslstats $workImg -k $workDir/${structName}_seg_2 -P 50)
idx=$(echo $median0 $median1 $median2 | tr " " "\n" | nl -v0 | sort -nrk2 | awk '{print $1}')
iWM=$(echo "$idx" | awk 'NR==1{print $1}')
iGM=$(echo "$idx" | awk 'NR==2{print $1}')
iCSF=$(echo "$idx" | awk 'NR==3{print $1}')
# keep only the larger contiguous cluster as a WM mask
cluster --in=$workDir/${structName}_pve_$iWM --thresh=1 --no_table --minextent=10000 --oindex=$workDir/WM
# keep only the high probability CSF voxels and exclude any WM voxels
if [[ -r ${structMask}_strict.nii.gz ]] ; then
# anything outside the strict brain mask is CSF
fslmaths ${structMask} -sub ${structMask}_strict -add $workDir/${structName}_pve_$iCSF -thr 1 -bin -sub $workDir/WM -bin $workDir/CSF
else
fslmaths $workDir/${structName}_pve_$iCSF -thr 1 -bin -sub $workDir/WM -bin $workDir/CSF
fi
# GMall is the inverse of CSF+WM, within the brain mask
if [[ -r $structMask.nii.gz ]] ; then
fslmaths $workDir/WM -add $workDir/CSF -binv -mas $structMask $workDir/GMall
else
fslmaths $workDir/WM -add $workDir/CSF -binv -mas $workDir/${structName}_seg $workDir/GMall
fi
# make a mask where we are more sure of the GM
fslmaths $workDir/${structName}_pve_$iGM -s 1 -thr 0.5 -bin -mas $workDir/GMall $workDir/GM
# Place for HACK
# define priors for compartments and subcortical structures based on reference
warp=$transDir/${refSpace}_to_${structImg}_warp
refSubCortMask=$MRCATDIR/data/macaque/$refSpace/subcortMask
refCSF=${refImg}_CSF
refGM=${refImg}_GM
refWM=${refImg}_WM
# try to retrieve a subcortical atlas and assign those structures to GM and GMall, removing them from CSF and WM
if [[ -r $refSubCortMask.nii.gz ]] && [[ -r $warp.nii.gz ]] ; then
# warp subcortical atlas from reference space to structural space
echo " warping subcortical atlas from the reference template to the structural"
if [[ -r ${structMask}.nii.gz ]] ; then
applywarp --rel --interp=nn -i $refSubCortMask -r $workImg -m $structMask -w $warp -o $workDir/subcortMask
else
applywarp --rel --interp=nn -i $refSubCortMask -r $workImg -m $workDir/${structName}_seg -w $warp -o $workDir/subcortMask
fi
# add subcortical structures to the GM and GMall masks
fslmaths $workDir/GM -add $workDir/subcortMask -bin $workDir/GM
fslmaths $workDir/GMall -add $workDir/subcortMask -bin $workDir/GMall
# exclude subcortical structures from the WM and CSF masks
fslmaths $workDir/WM -bin -sub $workDir/subcortMask -bin $workDir/WM
fslmaths $workDir/CSF -bin -sub $workDir/subcortMask -bin $workDir/CSF
else
echo " missing subcortical atlas or warp field, continuing without"
fi
# try to use compartment priors from the reference image to define compartment cores
if [[ -r $refCSF.nii.gz ]] && [[ -r $refGM.nii.gz ]] && [[ -r $refWM.nii.gz ]] && [[ -r $warp.nii.gz ]] ; then
echo " warping prior probability maps from the reference template to the structural"
# loop over reference compartment priors to warp
for refPrior in $refCSF $refGM $refWM ; do
basePrior=$(basename $refPrior)
# warp prior from reference space to structural space
if [[ -r ${structMask}.nii.gz ]] ; then
applywarp --rel --interp=spline -i $refPrior -r $workImg -m $structMask -w $warp -o $workDir/$basePrior
else
applywarp --rel --interp=spline -i $refPrior -r $workImg -w $warp -o $workDir/$basePrior
fi
fslmaths $workDir/$basePrior -thr 0 $workDir/$basePrior
done
# polish using priors
echo " create secondary compartments, masked by prior probability"
priorCSF=$workDir/$(basename $refCSF)
fslmaths $priorCSF -thr 0.3 -bin -mas $workDir/CSF $workDir/CSFcore
priorGM=$workDir/$(basename $refGM)
fslmaths $priorGM -thr 0.4 -bin -mas $workDir/GMall $workDir/GMcore
priorWM=$workDir/$(basename $refWM)
fslmaths $priorWM -thr 0.5 -bin -mas $workDir/WM $workDir/WMcore
# copy relevant images from workDir to structDir
imcp $workDir/CSFcore $workDir/GMcore $workDir/WMcore $structDir/
else
echo " missing reference compartment priors or warp field, continuing without"
fi
echo " eroding WM and CSF masks in structural space"
# erode the WM and CSF
voxSize=$(fslval $workDir/WM pixdim1 | awk '{val=100*$0; printf("%d\n", val+=val<0?-0.5:0.5)}')
if [[ $voxSize -lt 55 ]] ; then
fslmaths $workDir/WM -ero $workDir/WMero
fslmaths $workDir/WMero -ero $workDir/WMero2
fslmaths $workDir/GM -ero $workDir/GMero
fslmaths $structMask -binv -add $workDir/CSF -bin -ero -mas $structMask $workDir/CSFero
if [[ -f $workDir/WMcore.nii.gz ]] ; then
fslmaths $workDir/WMero -mas $workDir/WMcore $workDir/WMeroCore
fi
else
fslmaths $workDir/WM -s 1 -thr 0.8 -bin -mas $workDir/WM $workDir/WMero
fslmaths $workDir/WM -s 1 -thr 0.9 -bin -mas $workDir/WM $workDir/WMero2
fslmaths $workDir/GM -s 1 -thr 0.8 -bin -mas $workDir/GM $workDir/GMero
fslmaths $structMask -binv -add $workDir/CSF -s 1 -thr 0.7 -bin -mas $workDir/CSF $workDir/CSFero
fi
# copy relevant images from workDir to structDir
imcp $workDir/CSF $workDir/GM $workDir/WM $workDir/CSFero $workDir/GMero $workDir/WMero $workDir/WMero2 $workDir/GMall $structDir/
[[ -f $workDir/subcortMask.nii.gz ]] && imcp $workDir/subcortMask $structDir/
[[ -f $workDir/WMeroCore.nii.gz ]] && imcp $workDir/WMeroCore $structDir/
# clean up
rm -rf $workDir
echo " done"
fi
# hemimask
if [[ $instr =~ --hemimask$ ]] ; then
base=$structDir/${structImg}
echo "create hemisphere masks"
# specify left and right hemisphere masks in reference space
refMaskLeft=${refImg}_left_mask
refMaskRight=${refImg}_right_mask
# create these masks if they don't yet exist
if [[ ! -r $refMaskLeft.nii.gz ]] || [[ ! -r $refMaskRight.nii.gz ]] ; then
# make a mask with ones over the whole image
fslmaths $refMask -mul 0 -add 1 $refMaskRight
# find the cut between left and right
voxCut=$(fslval $refMask "dim1" | awk '{print $1/2}' | awk '{print int($1)}')
# split the
fslmaths $refMaskRight -roi 0 $voxCut 0 -1 0 -1 0 -1 $refMaskLeft
fslmaths $refMaskRight -roi $voxCut -1 0 -1 0 -1 0 -1 $refMaskRight
fi
# warp the hemisphere masks from reference to struct
applywarp --rel --interp=nn -i $refMaskLeft -r $base -w $transDir/${refSpace}_to_${structImg}_warp -o $structDir/left
applywarp --rel --interp=nn -i $refMaskRight -r $base -w $transDir/${refSpace}_to_${structImg}_warp -o $structDir/right
echo " done"
fi
|
neuroecology/MrCat
|
core/struct_macaque.sh
|
Shell
|
bsd-3-clause
| 24,953 |
nohup dstat -tcm --output /home/panfengfeng/trace_log/in-memory/movies/readrandom_multi_terark_index_256_old 2 > nohup.out &
file=/data/publicdata/movies/movies.txt
record_num=7911684
read_num=7911684
dirname=/mnt/datamemory
rm -rf $dirname/*
export TMPDIR=$dirname
cp ../../terarkschema/dbmeta_movies_index_old.json $dirname/dbmeta.json
echo "####Now, running terarkdb benchmark"
echo 3 > /proc/sys/vm/drop_caches
free -m
date
export TerarkDb_WrSegCacheSizeMB=256
../../db_movies_terark_index --benchmarks=fillrandom --num=$record_num --reads=$read_num --sync_index=0 --db=$dirname --resource_data=$file
free -m
date
du -s -b $dirname
echo "####terarkdb benchmark finish"
echo "####Now, running terarkdb benchmark"
export TMPDIR=$dirname
echo 3 > /proc/sys/vm/drop_caches
free -m
date
export TerarkDb_WrSegCacheSizeMB=256
../../db_movies_terark_index --benchmarks=readrandom --num=$record_num --reads=$read_num --sync_index=0 --db=$dirname --resource_data=$file
free -m
date
echo "####terarkdb benchmark finish"
du -s -b $dirname
echo "####Now, running terarkdb benchmark"
export TMPDIR=$dirname
echo 3 > /proc/sys/vm/drop_caches
free -m
date
export TerarkDb_WrSegCacheSizeMB=256
../../db_movies_terark_index --benchmarks=readrandom --num=$record_num --reads=$read_num --sync_index=0 --db=$dirname --threads=8 --resource_data=$file
free -m
date
echo "####terarkdb benchmark finish"
du -s -b $dirname
echo "####Now, running terarkdb benchmark"
export TMPDIR=$dirname
echo 3 > /proc/sys/vm/drop_caches
free -m
date
export TerarkDb_WrSegCacheSizeMB=256
../../db_movies_terark_index --benchmarks=readrandom --num=$record_num --reads=$read_num --sync_index=0 --db=$dirname --threads=16 --resource_data=$file
free -m
date
echo "####terarkdb benchmark finish"
du -s -b $dirname
dstatpid=`ps aux | grep dstat | awk '{if($0 !~ "grep"){print $2}}'`
for i in $dstatpid
do
kill -9 $i
done
|
panfengfeng/leveldb_nark
|
in-memory-bench/read/test_movies_terark_index_old.sh
|
Shell
|
bsd-3-clause
| 1,893 |
#!/bin/sh
make clean
${TERMITE_DIR}/termite-make -j200
make clean
|
hanwen/termite
|
test/open-files2/test.sh
|
Shell
|
bsd-3-clause
| 69 |
#!/bin/bash
bash $RECIPE_DIR/prepare.bash
$PYTHON -m pip install . --no-deps --ignore-installed --no-cache-dir -vvv
|
jjhelmus/berryconda
|
recipes/ruamel_yaml/build.sh
|
Shell
|
bsd-3-clause
| 117 |
ghc -outputdir='.ghc_garbage' --make -package-db=../../cabal-dev/packages-7.6.2.conf/ DSPInsert2.hs -O2 -rtsopts -prof -auto-all -caf-all -fforce-recomp
./DSPInsert2 +RTS -p
mv DSPInsert2.prof DSPInsert/02/prof.prof
./DSPInsert2 +RTS -i0.00002 -hc -p
mv DSPInsert2.hp DSPInsert/02/hc.hp
hp2ps -e8in -c DSPInsert/02/hc.hp
mv hc.ps DSPInsert/02/hc.ps
./DSPInsert2 +RTS -i0.00002 -hd -p
mv DSPInsert2.hp DSPInsert/02/hd.hp
hp2ps -e8in -c DSPInsert/02/hd.hp
mv hd.ps DSPInsert/02/hd.ps
./DSPInsert2 +RTS -i0.00002 -hy -p
mv DSPInsert2.hp DSPInsert/02/hy.hp
hp2ps -e8in -c DSPInsert/02/hy.hp
mv hy.ps DSPInsert/02/hy.ps
rm *.aux
./DSPInsert2 +RTS -sstderr &> DSPInsert/02/s.out
ghc -outputdir='.ghc_garbage' --make -package-db=../../cabal-dev/packages-7.6.2.conf/ DSPInsert2.hs -O2 -fforce-recomp
time ./DSPInsert2
|
Palmik/data-store
|
benchmarks/src/DSPInsert2_run.sh
|
Shell
|
bsd-3-clause
| 820 |
#!/bin/bash
echo "Testing DefectDojo Service"
echo "Waiting max 60s for services to start"
# Wait for services to become available
COUNTER=0
while [ $COUNTER -lt 10 ]; do
curl -s -o "/dev/null" $DD_BASE_URL -m 120
CR=$(curl --insecure -s -m 10 -I "${DD_BASE_URL}login?next=/" | egrep "^HTTP" | cut -d' ' -f2)
if [ "$CR" == 200 ]; then
echo "Succesfully displayed login page, starting integration tests"
break
fi
echo "Waiting: cannot display login screen; got HTTP code $CR"
sleep 10
let COUNTER=COUNTER+1
done
if [ $COUNTER -gt 10 ]; then
echo "ERROR: cannot display login screen; got HTTP code $CR"
exit 1
fi
# Run available unittests with a simple setup
# All available Integrationtest Scripts are activated below
# If successsful, A successs message is printed and the script continues
# If any script is unsuccesssful a failure message is printed and the test script
# Exits with status code of 1
function fail() {
echo "Error: $1 test failed\n"
exit 1
}
function success() {
echo "Success: $1 test passed\n"
}
test="Report Builder tests"
echo "Running: $test"
if python3 tests/report_builder_test.py ; then
success $test
else
fail $test
fi
test="Notes integration tests"
echo "Running: $test"
if python3 tests/notes_test.py ; then
success $test
else
fail $test
fi
test="Regulation integration tests"
echo "Running: $test"
if python3 tests/regulations_test.py ; then
success $test
else
fail $test
fi
test="Product type integration tests"
echo "Running: $test"
if python3 tests/product_type_test.py ; then
success $test
else
fail $test
fi
test="Product integration tests"
echo "Running: $test"
if python3 tests/product_test.py ; then
success $test
else
fail $test
fi
test="Endpoint integration tests"
echo "Running: $test"
if python3 tests/endpoint_test.py ; then
success $test
else
fail $test
fi
test="Engagement integration tests"
echo "Running: $test"
if python3 tests/engagement_test.py ; then
success $test
else
fail $test
fi
test="Environment integration tests"
echo "Running: $test"
if python3 tests/environment_test.py ; then
success $test
else
fail $test
fi
test="Finding integration tests"
echo "Running: $test"
if python3 tests/finding_test.py ; then
success $test
else
fail $test
fi
test="Test integration tests"
echo "Running: $test"
if python3 tests/test_test.py ; then
success $test
else
fail $test
fi
test="User integration tests"
echo "Running: $test"
if python3 tests/user_test.py ; then
success $test
else
fail $test
fi
test="Group integration tests"
echo "Running: $test"
if python3 tests/group_test.py ; then
success $test
else
fail $test
fi
test="Product Group integration tests"
echo "Running: $test"
if python3 tests/product_group_test.py ; then
success $test
else
fail $test
fi
test="Product Type Group integration tests"
echo "Running: $test"
if python3 tests/product_type_group_test.py ; then
success $test
else
fail $test
fi
test="Product member integration tests"
echo "Running: $test"
if python3 tests/product_member_test.py ; then
success $test
else
fail $test
fi
test="Product type member integration tests"
echo "Running: $test"
if python3 tests/product_type_member_test.py ; then
success $test
else
fail $test
fi
test="Ibm Appscan integration test"
echo "Running: $test"
if python3 tests/ibm_appscan_test.py ; then
success $test
else
fail $test
fi
test="Search integration test"
echo "Running: $test"
if python3 tests/search_test.py ; then
success $test
else
fail $test
fi
test="File Upload tests"
echo "Running: $test"
if python3 tests/file_test.py ; then
success $test
else
fail $test
fi
test="Dedupe integration tests"
echo "Running: $test"
if python3 tests/dedupe_test.py ; then
success $test
else
fail $test
fi
echo "Check Various Pages integration test"
if python3 tests/check_various_pages.py ; then
echo "Success: Check Various Pages tests passed"
else
echo "Error: Check Various Pages test failed"; exit 1
fi
# The below tests are commented out because they are still an unstable work in progress
## Once Ready they can be uncommented.
# echo "Import Scanner integration test"
# if python3 tests/import_scanner_test.py ; then
# echo "Success: Import Scanner integration tests passed"
# else
# echo "Error: Import Scanner integration test failed"; exit 1
# fi
# echo "Zap integration test"
# if python3 tests/zap.py ; then
# echo "Success: zap integration tests passed"
# else
# echo "Error: Zap integration test failed"; exit 1
# fi
exec echo "Done Running all configured integration tests."
|
rackerlabs/django-DefectDojo
|
docker/entrypoint-integration-tests.sh
|
Shell
|
bsd-3-clause
| 4,722 |
#!/bin/sh
set -e
cd $(dirname $0)
if [ "$1" = "" ]; then
echo "Usage: $0 <TV_IP>"
exit 1
fi
if [ -e 'auth_cookie' ]; then
read cookie < auth_cookie
curl --silent --cookie \"$cookie\" -XPOST http://$1/sony/system -d '{"method":"getRemoteControllerInfo","params":[],"id":10,"version":"1.0"}' | python -m json.tool
else
echo 'auth_cookie not found. Run ./auth.sh first.'
fi
|
BenWoodford/bravia-auth-and-remote
|
print_ircc_codes.sh
|
Shell
|
isc
| 387 |
#!/bin/sh
export NODE_ENV=test
export NODE_PATH=./src
node_modules/gulp/bin/gulp.js watch:phantom
|
rachelmcquirk/hiking
|
bin/watch_phantom.sh
|
Shell
|
mit
| 99 |
curl --data '' http://readthedocs.org/build/2613
|
domidimi/ddt
|
rtdocs.sh
|
Shell
|
mit
| 49 |
#!/bin/sh
#get the released plugin in npm registry
cordova plugin rm cordova-wheel-selector-plugin
cordova plugin add cordova-wheel-selector-plugin
|
jasonmamy/cordova-wheel-selector-plugin
|
examples/testapp/link-remote.sh
|
Shell
|
mit
| 149 |
phpdoc -d ./src/Vipa -t ./docs/phpdoc
|
okulbilisim/ojs
|
docs/phpdoc/generate.sh
|
Shell
|
mit
| 38 |
#!/bin/sh
bower install bootstrap#2.3.2
rm -rf src/main/jquery
mkdir -p src/main/less
mv src/main/bootstrap/less/* src/main/less
rm -rf src/main/bootstrap
lessc src/main/less/bootstrap.less fixtures/bootstrap.css
echo "Bootstrapped."
|
sbt/less-sbt
|
src/sbt-test/less-sbt/old-bootstrap/bootstrap.sh
|
Shell
|
mit
| 236 |
#!/bin/bash
# Remove munkireport script
rm -f "${MUNKIPATH}postflight.d/munkireport.py"
rm -f "${MUNKIPATH}preflight.d/munkireport.py"
# Remove munkireport.plist file
rm -f "${MUNKIPATH}postflight.d/cache/munkireport.plist"
rm -f "${MUNKIPATH}preflight.d/cache/munkireport.plist"
|
childrss/munkireport-php
|
app/modules/munkireport/scripts/uninstall.sh
|
Shell
|
mit
| 282 |
killall mongod
# remove the directories
rm -rf /data/rs1 /data/rs2 /data/rs3
# create them
mkdir -p /data/rs1 /data/rs2 /data/rs3
mongod --replSet m101 --logpath "1.log" --dbpath /data/rs1 --port 27017 --smallfiles --oplogSize 200 --fork
mongod --replSet m101 --logpath "2.log" --dbpath /data/rs2 --port 27018 --smallfiles --oplogSize 200 --fork
mongod --replSet m101 --logpath "3.log" --dbpath /data/rs3 --port 27019 --smallfiles --oplogSize 200 --fork
|
italoag/M101P
|
Week6/lesson_files/start_replica_set.sh
|
Shell
|
mit
| 456 |
#!/bin/bash
pylint --rcfile=.pylintrc ppp_datamodel_notation_parser
|
ProjetPP/PPP-DatamodelNotationParser
|
run_pylint.sh
|
Shell
|
mit
| 68 |
#!/usr/bin/env bash
while true
do
cargo test --color=always --package morpheus --bin morpheus relationship -- --nocapture
done
|
shisoft/Morpheus
|
scripts/repeatdly_model_test.sh
|
Shell
|
epl-1.0
| 127 |
#!/bin/bash
(cat VERSION 2>/dev/null || echo 0) | sed 's/^\([0-9][0-9]*\)\.\([0-9][0-9]*\)-\([0-9][0-9]*\)-.*/\2/g' || echo 0
|
lzxbill7/myleet
|
script/get_version_minor.sh
|
Shell
|
gpl-2.0
| 127 |
########################################################################
# Bug #983685: innodb_data_file_path is not written to backup-my.cnf
########################################################################
. inc/common.sh
start_server
options="innodb_data_file_path"
mkdir -p $topdir/backup
innobackupex $topdir/backup
backup_dir=`grep "Backup created in directory" $OUTFILE | awk -F\' '{ print $2}'`
vlog "Backup created in directory $backup_dir"
# test presence of options
for option in $options ; do
if ! grep $option $backup_dir/backup-my.cnf ; then
vlog "Option $option is absent"
exit -1
else
vlog "Option $option is present"
fi
done
|
janlindstrom/percona-xtrabackup
|
storage/innobase/xtrabackup/test/t/bug983685.sh
|
Shell
|
gpl-2.0
| 666 |
#!/bin/bash
export ARCH=arm
export CROSS_COMPILE=arm-linux-androideabi-
|
jstotero/Old_Cucciolone
|
setenv.sh
|
Shell
|
gpl-2.0
| 72 |
#!/bin/bash
#
##################################################################################################################
#
# DO NOT JUST RUN THIS. EXAMINE AND JUDGE. AT YOUR OWN RISK.
#
##################################################################################################################
conky -c ~/.config/i3/system-overview
conky -c ~/.config/i3/system-shortcuts
|
erikdubois/Archi3
|
start-conkys.sh
|
Shell
|
gpl-2.0
| 388 |
# (c) 2014-2015 Sam Nazarko
# [email protected]
#!/bin/bash
. ../common.sh
# Build in native environment
if [ $1 == "rbp1" ]; then pull_source "https://github.com/bavison/arm-mem/archive/cd2c8f9202137c79f7afb77ecb87e713a0800d3c.zip" "$(pwd)/src"; fi
if [ $1 == "rbp2" ]; then pull_source "https://github.com/bavison/arm-mem/archive/master.zip" "$(pwd)/src"; fi
build_in_env "${1}" $(pwd) "rbp-armmem-osmc"
if [ $? == 0 ]
then
echo -e "Building package rbp-armmem"
out=$(pwd)/files
sed '/Package/d' -i files/DEBIAN/control
echo "Package: ${1}-armmem-osmc" >> files/DEBIAN/control
make clean
pushd src/arm-mem-*
make
if [ $? != 0 ]; then echo "Error occured during build" && exit 1; fi
strip_libs
mkdir -p $out/usr/lib
cp -ar libarmmem.so $out/usr/lib
cp -ar libarmmem.a $out/usr/lib
popd
fix_arch_ctl "files/DEBIAN/control"
dpkg -b files/ rbp-armmem-osmc.deb
fi
teardown_env "${1}"
|
ActionAdam/osmc
|
package/rbp-armmem-osmc/build.sh
|
Shell
|
gpl-2.0
| 909 |
function named()
{
case "$1" in
"yes")
$YUM_CMD bind bind-utils 2>&1 | tee -a $LOG
# if xs data files are installed before bind, named user doesn't exist,
# and group ownership is set to root, which user named cannot read
if [ -d /var/named-xs ]; then
chown -R named /var/named-xs
fi
systemctl enable named.service 2>&1 | tee -a $LOG
systemctl restart named.service 2>&1 | tee -a $LOG
touch $SETUPSTATEDIR/named
;;
"no")
systemctl disable named.service 2>&1 | tee -a $LOG
systemctl stop named.service 2>&1 | tee -a $LOG
# let the dhclinet control the name resolution normally
#rm /etc/sysconfig/olpc-scripts/resolv.conf
rm $SETUPSTATEDIR/named
;;
esac
}
|
tim-moody/xsce
|
deprecated/plugins.d/named/named.sh
|
Shell
|
gpl-2.0
| 778 |
#! /bin/sh
$XGETTEXT *.cpp -o $podir/imagerename_plugin.pot
|
kolab-groupware/kde-runtime
|
renamedlgplugins/images/Messages.sh
|
Shell
|
gpl-2.0
| 61 |
#!/bin/sh
# --- T2-COPYRIGHT-NOTE-BEGIN ---
# This copyright note is auto-generated by ./scripts/Create-CopyPatch.
#
# T2 SDE: package/.../xorg-server/xvfb-run.sh
# Copyright (C) 2005 The T2 SDE Project
# Copyright (C) XXXX - 2005 Debian
#
# More information can be found in the files COPYING and README.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License. A copy of the
# GNU General Public License can be found in the file COPYING.
# --- T2-COPYRIGHT-NOTE-END ---
# $Id: xvfb-run 2166 2005-01-27 07:54:19Z branden $
# from: http://necrotic.deadbeast.net/xsf/XFree86/trunk/debian/local/xvfb-run
# This script starts an instance of Xvfb, the "fake" X server, runs a command
# with that server available, and kills the X server when done. The return
# value of the command becomes the return value of this script.
#
# If anyone is using this to build a Debian package, make sure the package
# Build-Depends on xvfb, xbase-clients, and xfonts-base.
set -e
PROGNAME=xvfb-run
SERVERNUM=99
AUTHFILE=
ERRORFILE=/dev/null
STARTWAIT=3
XVFBARGS="-screen 0 640x480x8"
LISTENTCP="-nolisten tcp"
XAUTHPROTO=.
# Query the terminal to establish a default number of columns to use for
# displaying messages to the user. This is used only as a fallback in the event
# the COLUMNS variable is not set. ($COLUMNS can react to SIGWINCH while the
# script is running, and this cannot, only being calculated once.)
DEFCOLUMNS=$(stty size 2>/dev/null | awk '{print $2}') || true
if ! expr "$DEFCOLUMNS" : "[[:digit:]]\+$" >/dev/null 2>&1; then
DEFCOLUMNS=80
fi
# Display a message, wrapping lines at the terminal width.
message () {
echo "$PROGNAME: $*" | fmt -t -w ${COLUMNS:-$DEFCOLUMNS}
}
# Display an error message.
error () {
message "error: $*" >&2
}
# Display a usage message.
usage () {
if [ -n "$*" ]; then
message "usage error: $*"
fi
cat <<EOF
Usage: $PROGNAME [OPTION ...] COMMAND
Run COMMAND (usually an X client) in a virtual X server environment.
Options:
-a --auto-servernum try to get a free server number, starting at
--server-num
-e FILE --error-file=FILE file used to store xauth errors and Xvfb
output (default: $ERRORFILE)
-f FILE --auth-file=FILE file used to store auth cookie
(default: ./.Xauthority)
-h --help display this usage message and exit
-n NUM --server-num=NUM server number to use (default: $SERVERNUM)
-l --listen-tcp enable TCP port listening in the X server
-p PROTO --xauth-protocol=PROTO X authority protocol name to use
(default: xauth command's default)
-s ARGS --server-args=ARGS arguments (other than server number and
"-nolisten tcp") to pass to the Xvfb server
(default: "$XVFBARGS")
-w DELAY --wait=DELAY delay in seconds to wait for Xvfb to start
before running COMMAND (default: $STARTWAIT)
EOF
}
# Find a free server number by looking at .X*-lock files in /tmp.
find_free_servernum() {
# Sadly, the "local" keyword is not POSIX. Leave the next line commented in
# the hope Debian Policy eventually changes to allow it in /bin/sh scripts
# anyway.
#local i
i=$SERVERNUM
while [ -f /tmp/.X$i-lock ]; do
i=$(($i + 1))
done
echo $i
}
# Parse the command line.
ARGS=$(getopt --options +ae:f:hn:lp:s:w: \
--long auto-servernum,error-file:auth-file:,help,server-num:,listen-tcp,xauth-protocol:,server-args:,wait: \
--name "$PROGNAME" -- "$@")
GETOPT_STATUS=$?
if [ $GETOPT_STATUS -ne 0 ]; then
error "internal error; getopt exited with status $GETOPT_STATUS"
exit 6
fi
eval set -- "$ARGS"
while :; do
case "$1" in
-a|--auto-servernum) SERVERNUM=$(find_free_servernum) ;;
-e|--error-file) ERRORFILE="$2"; shift ;;
-f|--auth-file) AUTHFILE="$2"; shift ;;
-h|--help) SHOWHELP="yes" ;;
-n|--server-num) SERVERNUM="$2"; shift ;;
-l|--listen-tcp) LISTENTCP="" ;;
-p|--xauth-protocol) XAUTHPROTO="$2"; shift ;;
-s|--server-args) XVFBARGS="$2"; shift ;;
-w|--wait) STARTWAIT="$2"; shift ;;
--) shift; break ;;
*) error "internal error; getopt permitted \"$1\" unexpectedly"
exit 6
;;
esac
shift
done
if [ "$SHOWHELP" ]; then
usage
exit 0
fi
if [ -z "$*" ]; then
usage "need a command to run" >&2
exit 2
fi
if ! which xauth >/dev/null; then
error "xauth command not found"
exit 3
fi
# If the user did not specify an X authorization file to use, set up a temporary
# directory to house one.
if [ -z "$AUTHFILE" ]; then
XVFB_RUN_TMPDIR="${TMPDIR:-/tmp}/$PROGNAME.$$"
if ! mkdir -p -m 700 "$XVFB_RUN_TMPDIR"; then
error "temporary directory $XVFB_RUN_TMPDIR already exists"
exit 4
fi
AUTHFILE=$(mktemp -p "$XVFB_RUN_TMPDIR" Xauthority)
fi
# Start Xvfb.
MCOOKIE=$(mcookie)
XAUTHORITY=$AUTHFILE xauth add ":$SERVERNUM" "$XAUTHPROTO" "$MCOOKIE" \
>"$ERRORFILE" 2>&1
XAUTHORITY=$AUTHFILE Xvfb ":$SERVERNUM" $XVFBARGS $LISTENTCP >"$ERRORFILE" \
2>&1 &
XVFBPID=$!
sleep "$STARTWAIT"
# Start the command and save its exit status.
set +e
DISPLAY=:$SERVERNUM XAUTHORITY=$AUTHFILE "$@" 2>&1
RETVAL=$?
set -e
# Kill Xvfb now that the command has exited.
kill $XVFBPID
# Clean up.
XAUTHORITY=$AUTHFILE xauth remove ":$SERVERNUM" >"$ERRORFILE" 2>&1
if [ -n "$XVFB_RUN_TMPDIR" ]; then
if ! rm -r "$XVFB_RUN_TMPDIR"; then
error "problem while cleaning up temporary directory"
exit 5
fi
fi
# Return the executed command's exit status.
exit $RETVAL
# vim:set ai et sts=4 sw=4 tw=80:
|
picklingjar/Webkitd
|
scripts/xvfb-run.sh
|
Shell
|
gpl-3.0
| 6,031 |
#!/bin/sh
(cd ../.. ; ./run.sh Recom2 1 v-naif constant 10 datasets/renault_medium 10 -e -s 10 | tee experiments/exp14/vnaif-10-medium-10s)
(cd ../.. ; ./run.sh Recom2 1 v-naif constant 20 datasets/renault_medium 10 -e -s 10 | tee experiments/exp14/vnaif-20-medium-10s)
(cd ../.. ; ./run.sh Recom2 1 v-naif constant 50 datasets/renault_medium 10 -e -s 10 | tee experiments/exp14/vnaif-50-medium-10s)
(cd ../.. ; ./run.sh Recom2 1 v-naif constant 200 datasets/renault_medium 10 -e -s 10 | tee experiments/exp14/vnaif-200-medium-10s)
(cd ../.. ; ./run.sh Recom2 1 v-naif constant 2000 datasets/renault_medium 10 -e -s 10 | tee experiments/exp14/vnaif-2000-medium-10s)
(cd ../.. ; ./run.sh Recom2 1 v-naif inverse 1000 datasets/renault_medium 10 -e -s 10 | tee experiments/exp14/vnaif-inverse-1000-medium-10s)
(cd ../.. ; ./run.sh Recom2 1 v-naif inverse 750 datasets/renault_medium 10 -e -s 10 | tee experiments/exp14/vnaif-inverse-750-medium-10s)
(cd ../.. ; ./run.sh Recom2 1 jointree hc datasets/renault_medium 10 -e -s 10| tee experiments/exp14/jointree-medium-10s)
notify-send 'An experiment just completed'
|
PFgimenez/thesis
|
experiments/exp14/run-nb-neighbours-medium.sh
|
Shell
|
gpl-3.0
| 1,113 |
#!/bin/sh
# GNSS-SDR shell script that enables the remote GNSS-SDR restart telecommand
# usage: ./gnss-sdr-harness.sh ./gnss-sdr -c config_file.conf
# SPDX-FileCopyrightText: Javier Arribas <javier.arribas(at)cttc.es>
# SPDX-License-Identifier: GPL-3.0-or-later
echo $@
$@
while [ $? -eq 42 ]
do
echo "restarting GNSS-SDR..."
$@
done
|
gnss-sdr/gnss-sdr
|
src/utils/scripts/gnss-sdr-harness.sh
|
Shell
|
gpl-3.0
| 338 |
sed 's/defaultOS = .*,/defaultOS = '$OS',/g' $1 | \
sed 's/defaultWordsize = .*,/defaultWordsize = '$WORDSIZE',/g'
|
Pastor/fcc
|
makedefaults.sh
|
Shell
|
gpl-3.0
| 114 |
#!/bin/bash
#-------------------------------------------------------
# GET INTERFACE TYPE 6
# generator for xmp_coarray_get.h
# see also ../src/xmpf_coarray_get_wrap.f90{,.sh}
#-------------------------------------------------------
#--------------------
# sub
#--------------------
echo72 () {
str="$1 "
str=`echo "$str" | cut -c -72`"&"
echo "$str"
}
print_function() {
tk=$1
typekind=$2
echo72 " function xmpf_coarray_get${DIM}d_${tk}(descptr, baseaddr, element,"
echo72 " & coindex, mold, rank"
for i in `seq 1 ${DIM}`; do
echo72 " & , nextaddr${i}, count${i}"
done
echo ' & ) result(val)'
echo ' integer(8), intent(in) :: descptr'
echo ' integer, intent(in) :: element, coindex, rank'
for i in `seq 1 ${DIM}`; do
echo " integer, intent(in) :: count${i}"
done
echo ' integer(8), intent(in) :: baseaddr'
for i in `seq 1 ${DIM}`; do
echo " integer(8), intent(in) :: nextaddr${i}"
done
case ${DIM} in
0) echo " ${typekind} :: mold" ;;
1) echo " ${typekind} :: mold(count1)" ;;
*) echo72 " ${typekind} ::"
echo -n " & mold(count1"
for i in `seq 2 ${DIM}`; do
echo -n ",count${i}"
done
echo ')' ;;
esac
case ${DIM} in
0) echo " ${typekind} :: val" ;;
1) echo " ${typekind} :: val(count1)" ;;
*) echo72 " ${typekind} ::"
echo -n " & val(count1"
for i in `seq 2 ${DIM}`; do
echo -n ",count${i}"
done
echo ')' ;;
esac
echo " end function"
}
#--------------------
# main
#--------------------
echo "!! This file is automatically generated by $0"
echo
TARGET=$1
for DIM in `seq 0 7`
do
echo "!-------------------------------------------------------"
echo " interface xmpf_coarray_get${DIM}d"
echo "!-------------------------------------------------------"
echo "!!! real(kind=16) is not supported in XMP/F"
echo "!!! complex(kind=16) (32bytes) is not supported in XMP/F"
echo
if test "sxace-nec-superux" != "$TARGET"; then ## integer(2) cannot be used on SX-ACE
print_function i2 "integer(2)"
fi
print_function i4 "integer(4)"
print_function i8 "integer(8)"
if test "sxace-nec-superux" != "$TARGET"; then ## logical(2) cannot be used on SX-ACE
print_function l2 "logical(2)"
fi
print_function l4 "logical(4)"
print_function l8 "logical(8)"
print_function r4 "real(4)"
print_function r8 "real(8)"
print_function z8 "complex(4)"
print_function z16 "complex(8)"
print_function cn "character(element)"
echo
echo " end interface"
echo
done
exit
|
omni-compiler/omni-compiler
|
libxmpf/include/xmp_coarray_get.h-Type6.sh
|
Shell
|
lgpl-3.0
| 2,995 |
#! /bin/bash
# verify the accept order is observed
curl_sparql_request \
-H 'Accept: application/sparql-results+json,application/sparql-results+xml,*/*;q=0.9' \
'query=select%20count(*)%20where%20%7b?s%20?p%20?o%7d' \
| jq '.results.bindings[] | .[].value' | fgrep -q '"1"'
|
dydra/http-api-tests
|
sparql-protocol/GET-count-srj+srx.sh
|
Shell
|
unlicense
| 289 |
#!/bin/bash
# Copyright 2017 The OpenEBS Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#./ci/helm_install_openebs.sh
# global env vars to be used in test scripts
export CI_BRANCH="master"
export CI_TAG="ci"
export MAYACTL="$GOPATH/src/github.com/openebs/maya/bin/maya/mayactl"
./ci/build-maya.sh
rc=$?; if [[ $rc != 0 ]]; then exit $rc; fi
curl https://raw.githubusercontent.com/openebs/openebs/master/k8s/ci/test-script.sh > test-script.sh
# append mayactl tests to this script
cat ./ci/mayactl.sh >> ./test-script.sh
# append local pv tests to this script
#cat ./ci/local_pv.sh >> ./test-script.sh
chmod +x test-script.sh && ./test-script.sh
rc=$?; if [[ $rc != 0 ]]; then exit $rc; fi
|
AmitKumarDas/maya
|
ci/travis-ci.sh
|
Shell
|
apache-2.0
| 1,206 |
#!/bin/bash
# BASH function that kill and remove the running containers
function stop()
{
P1=$(docker ps -q)
if [ "${P1}" != "" ]; then
echo "Killing all running containers" &2> /dev/null
docker kill ${P1}
fi
P2=$(docker ps -aq)
if [ "${P2}" != "" ]; then
echo "Removing all containers" &2> /dev/null
docker rm ${P2} -f
fi
}
# Function to remove the images as well
function remove()
{
P=$(docker images * -q)
if [ "${P}" != "" ]; then
echo "Removing images" &2> /dev/null
docker rmi ${P} -f
fi
}
echo "For all Docker containers or images (not just Hyperledger Fabric and Composer)"
echo "1 - Kill and remove only the containers"
echo "2 - Kill and remove the containers and remove all the downloaded images"
echo "3 - Quit and not do anything"
echo
PS3="Please select which option > "
options=("Kill & Remove" "Remove Images" "Quit")
select yn in "${options[@]}"; do
case $yn in
"Kill & Remove" ) stop; break;;
"Remove Images" ) stop; remove; break;;
"Quit" ) exit;;
esac
done
|
xchenibm/forktest
|
teardownAllDocker.sh
|
Shell
|
apache-2.0
| 1,031 |
# Copyright (c) 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
export EXTRA_LIBS="${NACL_CLI_MAIN_LIB}"
EnableGlibcCompat
NACLPORTS_CPPFLAGS+=" -DGNULIB_defined_struct_sigaction -Dpipe=nacl_spawn_pipe"
PatchStep() {
DefaultPatchStep
# Touch documentation to prevent it from updating.
touch ${SRC_DIR}/doc/*
}
|
GoogleChromeLabs/chromeos_smart_card_connector
|
third_party/webports/src/src/ports/m4/build.sh
|
Shell
|
apache-2.0
| 427 |
#!/bin/bash
! golint ./... | egrep -v $(cat .excludelint | sed -e 's/^/ -e /' | sed -e 's/(//' | sed -e 's/)//' | tr -d '\n')
|
m3db/m3storage
|
.ci/lint.sh
|
Shell
|
apache-2.0
| 128 |
#!/bin/bash
# Copyright 2018 The Tekton Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a collection of useful bash functions and constants, intended
# to be used in test scripts and the like. It doesn't do anything when
# called from command line.
# Default GKE version to be used with Tekton Serving
readonly SERVING_GKE_VERSION=gke-channel-regular
readonly SERVING_GKE_IMAGE=cos
# Conveniently set GOPATH if unset
if [[ -z "${GOPATH:-}" ]]; then
export GOPATH="$(go env GOPATH)"
if [[ -z "${GOPATH}" ]]; then
echo "WARNING: GOPATH not set and go binary unable to provide it"
fi
fi
# Useful environment variables
[[ -n "${PROW_JOB_ID:-}" ]] && IS_PROW=1 || IS_PROW=0
readonly IS_PROW
readonly REPO_ROOT_DIR="${REPO_ROOT_DIR:-$(git rev-parse --show-toplevel 2> /dev/null)}"
readonly REPO_NAME="${REPO_NAME:-$(basename ${REPO_ROOT_DIR} 2> /dev/null)}"
# Set ARTIFACTS to an empty temp dir if unset
if [[ -z "${ARTIFACTS:-}" ]]; then
export ARTIFACTS="$(mktemp -d)"
fi
# On a Prow job, redirect stderr to stdout so it's synchronously added to log
(( IS_PROW )) && exec 2>&1
# Print error message and exit 1
# Parameters: $1..$n - error message to be displayed
function abort() {
echo "error: $@"
exit 1
}
# Display a box banner.
# Parameters: $1 - character to use for the box.
# $2 - banner message.
function make_banner() {
local msg="$1$1$1$1 $2 $1$1$1$1"
local border="${msg//[-0-9A-Za-z _.,\/()]/$1}"
echo -e "${border}\n${msg}\n${border}"
}
# Simple header for logging purposes.
function header() {
local upper="$(echo $1 | tr a-z A-Z)"
make_banner "=" "${upper}"
}
# Simple subheader for logging purposes.
function subheader() {
make_banner "-" "$1"
}
# Simple warning banner for logging purposes.
function warning() {
make_banner "!" "$1"
}
# Checks whether the given function exists.
function function_exists() {
[[ "$(type -t $1)" == "function" ]]
}
# Waits until the given object doesn't exist.
# Parameters: $1 - the kind of the object.
# $2 - object's name.
# $3 - namespace (optional).
function wait_until_object_does_not_exist() {
local KUBECTL_ARGS="get $1 $2"
local DESCRIPTION="$1 $2"
if [[ -n $3 ]]; then
KUBECTL_ARGS="get -n $3 $1 $2"
DESCRIPTION="$1 $3/$2"
fi
echo -n "Waiting until ${DESCRIPTION} does not exist"
for i in {1..150}; do # timeout after 5 minutes
if ! kubectl ${KUBECTL_ARGS} > /dev/null 2>&1; then
echo -e "\n${DESCRIPTION} does not exist"
return 0
fi
echo -n "."
sleep 2
done
echo -e "\n\nERROR: timeout waiting for ${DESCRIPTION} not to exist"
kubectl ${KUBECTL_ARGS}
return 1
}
# Waits until all pods are running in the given namespace.
# Parameters: $1 - namespace.
function wait_until_pods_running() {
echo -n "Waiting until all pods in namespace $1 are up"
for i in {1..150}; do # timeout after 5 minutes
local pods="$(kubectl get pods --no-headers -n $1 2>/dev/null)"
# All pods must be running
local not_running=$(echo "${pods}" | grep -v Running | grep -v Completed | wc -l)
if [[ -n "${pods}" && ${not_running} -eq 0 ]]; then
local all_ready=1
while read pod ; do
local status=(`echo -n ${pod} | cut -f2 -d' ' | tr '/' ' '`)
# All containers must be ready
[[ -z ${status[0]} ]] && all_ready=0 && break
[[ -z ${status[1]} ]] && all_ready=0 && break
[[ ${status[0]} -lt 1 ]] && all_ready=0 && break
[[ ${status[1]} -lt 1 ]] && all_ready=0 && break
[[ ${status[0]} -ne ${status[1]} ]] && all_ready=0 && break
done <<< $(echo "${pods}" | grep -v Completed)
if (( all_ready )); then
echo -e "\nAll pods are up:\n${pods}"
return 0
fi
fi
echo -n "."
sleep 2
done
echo -e "\n\nERROR: timeout waiting for pods to come up\n${pods}"
return 1
}
# Waits until all batch jobs complete in the given namespace.
# Parameters: $1 - namespace.
function wait_until_batch_job_complete() {
echo -n "Waiting until all batch jobs in namespace $1 run to completion."
for i in {1..150}; do # timeout after 5 minutes
local jobs=$(kubectl get jobs -n $1 --no-headers \
-ocustom-columns='n:{.metadata.name},c:{.spec.completions},s:{.status.succeeded}')
# All jobs must be complete
local not_complete=$(echo "${jobs}" | awk '{if ($2!=$3) print $0}' | wc -l)
if [[ ${not_complete} -eq 0 ]]; then
echo -e "\nAll jobs are complete:\n${jobs}"
return 0
fi
echo -n "."
sleep 2
done
echo -e "\n\nERROR: timeout waiting for jobs to complete\n${jobs}"
return 1
}
# Waits until the given service has an external address (IP/hostname).
# Parameters: $1 - namespace.
# $2 - service name.
function wait_until_service_has_external_ip() {
echo -n "Waiting until service $2 in namespace $1 has an external address (IP/hostname)"
for i in {1..150}; do # timeout after 15 minutes
local ip=$(kubectl get svc -n $1 $2 -o jsonpath="{.status.loadBalancer.ingress[0].ip}")
if [[ -n "${ip}" ]]; then
echo -e "\nService $2.$1 has IP $ip"
return 0
fi
local hostname=$(kubectl get svc -n $1 $2 -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
if [[ -n "${hostname}" ]]; then
echo -e "\nService $2.$1 has hostname $hostname"
return 0
fi
echo -n "."
sleep 6
done
echo -e "\n\nERROR: timeout waiting for service $2.$1 to have an external address"
kubectl get pods -n $1
return 1
}
# Waits for the endpoint to be routable.
# Parameters: $1 - External ingress IP address.
# $2 - cluster hostname.
function wait_until_routable() {
echo -n "Waiting until cluster $2 at $1 has a routable endpoint"
for i in {1..150}; do # timeout after 5 minutes
local val=$(curl -H "Host: $2" "http://$1" 2>/dev/null)
if [[ -n "$val" ]]; then
echo -e "\nEndpoint is now routable"
return 0
fi
echo -n "."
sleep 2
done
echo -e "\n\nERROR: Timed out waiting for endpoint to be routable"
return 1
}
# Returns the name of the first pod of the given app.
# Parameters: $1 - app name.
# $2 - namespace (optional).
function get_app_pod() {
local pods=($(get_app_pods $1 $2))
echo "${pods[0]}"
}
# Returns the name of all pods of the given app.
# Parameters: $1 - app name.
# $2 - namespace (optional).
function get_app_pods() {
local namespace=""
[[ -n $2 ]] && namespace="-n $2"
kubectl get pods ${namespace} --selector=app=$1 --output=jsonpath="{.items[*].metadata.name}"
}
# Capitalize the first letter of each word.
# Parameters: $1..$n - words to capitalize.
function capitalize() {
local capitalized=()
for word in $@; do
local initial="$(echo ${word:0:1}| tr 'a-z' 'A-Z')"
capitalized+=("${initial}${word:1}")
done
echo "${capitalized[@]}"
}
# Dumps pod logs for the given app.
# Parameters: $1 - app name.
# $2 - namespace.
function dump_app_logs() {
echo ">>> ${REPO_NAME_FORMATTED} $1 logs:"
for pod in $(get_app_pods "$1" "$2")
do
echo ">>> Pod: $pod"
kubectl -n "$2" logs "$pod" -c "$1"
done
}
# Runs a go test and generate a junit summary.
# Parameters: $1... - parameters to go test
function report_go_test() {
# Run tests in verbose mode to capture details.
# go doesn't like repeating -v, so remove if passed.
local args=" $@ "
local go_test="go test -race -v ${args/ -v / }"
# Just run regular go tests if not on Prow.
echo "Running tests with '${go_test}'"
local report=$(mktemp)
${go_test} | tee ${report}
local failed=( ${PIPESTATUS[@]} )
[[ ${failed[0]} -eq 0 ]] && failed=${failed[1]} || failed=${failed[0]}
echo "Finished run, return code is ${failed}"
# Install go-junit-report if necessary.
run_go_tool github.com/jstemmer/go-junit-report go-junit-report --help > /dev/null 2>&1
local xml=$(mktemp ${ARTIFACTS}/junit_XXXXXXXX.xml)
cat "${report}" \
| go-junit-report \
| sed -e "s#\"github.com/tektoncd/${REPO_NAME}/#\"#g" \
> ${xml}
echo "XML report written to ${xml}"
if (( ! IS_PROW )); then
# Keep the suffix, so files are related.
local logfile=${xml/junit_/go_test_}
logfile=${logfile/.xml/.log}
cp ${report} ${logfile}
echo "Test log written to ${logfile}"
fi
return ${failed}
}
# Run a go tool, installing it first if necessary.
# Parameters: $1 - tool package/dir for go get/install.
# $2 - tool to run.
# $3..$n - parameters passed to the tool.
function run_go_tool() {
local tool=$2
if [[ -z "$(which ${tool})" ]]; then
local action=get
[[ $1 =~ ^[\./].* ]] && action=install
go ${action} $1
fi
shift 2
${tool} "$@"
}
# Update licenses.
# Parameters: $1 - output file, relative to repo root dir.
# $2...$n - directories and files to inspect.
function update_licenses() {
cd ${REPO_ROOT_DIR} || return 1
local dst=$1
shift
go-licenses save ./... --save_path=${dst} --force
# Hack to make sure directories retain write permissions after save. This
# can happen if the directory being copied is a Go module.
# See https://github.com/google/go-licenses/issues/11
chmod +w $(find ${dst} -type d)
}
# Check for forbidden liceses.
# Parameters: $1...$n - directories and files to inspect.
function check_licenses() {
go-licenses check ./...
}
# Run the given linter on the given files, checking it exists first.
# Parameters: $1 - tool
# $2 - tool purpose (for error message if tool not installed)
# $3 - tool parameters (quote if multiple parameters used)
# $4..$n - files to run linter on
function run_lint_tool() {
local checker=$1
local params=$3
if ! hash ${checker} 2>/dev/null; then
warning "${checker} not installed, not $2"
return 127
fi
shift 3
local failed=0
for file in $@; do
${checker} ${params} ${file} || failed=1
done
return ${failed}
}
# Check links in the given markdown files.
# Parameters: $1...$n - files to inspect
function check_links_in_markdown() {
# https://github.com/raviqqe/liche
local config="${REPO_ROOT_DIR}/test/markdown-link-check-config.rc"
[[ ! -e ${config} ]] && config="${_PLUMBING_SCRIPTS_DIR}/markdown-link-check-config.rc"
local options="$(grep '^-' ${config} | tr \"\n\" ' ')"
run_lint_tool liche "checking links in markdown files" "-d ${REPO_ROOT_DIR} ${options}" $@
}
# Check format of the given markdown files.
# Parameters: $1..$n - files to inspect
function lint_markdown() {
# https://github.com/markdownlint/markdownlint
local config="${REPO_ROOT_DIR}/test/markdown-lint-config.rc"
[[ ! -e ${config} ]] && config="${_PLUMBING_SCRIPTS_DIR}/markdown-lint-config.rc"
run_lint_tool mdl "linting markdown files" "-c ${config}" $@
}
# Return whether the given parameter is an integer.
# Parameters: $1 - integer to check
function is_int() {
[[ -n $1 && $1 =~ ^[0-9]+$ ]]
}
# Return whether the given parameter is the tekton release/nightly GCF.
# Parameters: $1 - full GCR name, e.g. gcr.io/tekton-foo-bar
function is_protected_gcr() {
[[ -n $1 && "$1" =~ "^gcr.io/tekton-(releases|nightly)/?$" ]]
}
# Remove symlinks in a path that are broken or lead outside the repo.
# Parameters: $1 - path name, e.g. vendor
function remove_broken_symlinks() {
for link in $(find $1 -type l); do
# Remove broken symlinks
if [[ ! -e ${link} ]]; then
unlink ${link}
continue
fi
# Get canonical path to target, remove if outside the repo
local target="$(ls -l ${link})"
target="${target##* -> }"
[[ ${target} == /* ]] || target="./${target}"
target="$(cd `dirname ${link}` && cd ${target%/*} && echo $PWD/${target##*/})"
if [[ ${target} != *github.com/tektoncd/* ]]; then
unlink ${link}
continue
fi
done
}
# Return whether the given parameter is tekton-tests.
# Parameters: $1 - project name
function is_protected_project() {
[[ -n "$1" && "$1" == "tekton-tests" ]]
}
# Returns the canonical path of a filesystem object.
# Parameters: $1 - path to return in canonical form
# $2 - base dir for relative links; optional, defaults to current
function get_canonical_path() {
# We don't use readlink because it's not available on every platform.
local path=$1
local pwd=${2:-.}
[[ ${path} == /* ]] || path="${pwd}/${path}"
echo "$(cd ${path%/*} && echo $PWD/${path##*/})"
}
# Initializations that depend on previous functions.
# These MUST come last.
readonly _PLUMBING_SCRIPTS_DIR="$(dirname $(get_canonical_path ${BASH_SOURCE[0]}))"
readonly REPO_NAME_FORMATTED="Tekton $(capitalize ${REPO_NAME//-/})"
# Helper functions to run YAML tests
# Taken from tektoncd/pipeline test/e2e-common.sh
function validate_run() {
local tests_finished=0
for i in {1..60}; do
local finished="$(kubectl get $1.tekton.dev --output=jsonpath='{.items[*].status.conditions[*].status}')"
if [[ ! "$finished" == *"Unknown"* ]]; then
tests_finished=1
break
fi
sleep 10
done
return ${tests_finished}
}
function check_results() {
local failed=0
results="$(kubectl get $1.tekton.dev --output=jsonpath='{range .items[*]}{.metadata.name}={.status.conditions[*].type}{.status.conditions[*].status}{" "}{end}')"
for result in ${results}; do
if [[ ! "${result,,}" == *"=succeededtrue" ]]; then
echo "ERROR: test ${result} but should be succeededtrue"
failed=1
fi
done
return ${failed}
}
function create_resources() {
local resource=$1
echo ">> Creating resources ${resource}"
# Applying the resources, either *taskruns or * *pipelineruns
for file in $(find ${REPO_ROOT_DIR}/examples/${resource}s/ -name *.yaml | sort); do
perl -p -e 's/gcr.io\/christiewilson-catfactory/$ENV{KO_DOCKER_REPO}/g' ${file} | ko apply -f - || return 1
done
}
function run_tests() {
local resource=$1
# Wait for tests to finish.
echo ">> Waiting for tests to finish for ${resource}"
if validate_run $resource; then
echo "ERROR: tests timed out"
fi
# Check that tests passed.
echo ">> Checking test results for ${resource}"
if check_results $resource; then
echo ">> All YAML tests passed"
return 0
fi
return 1
}
function run_yaml_tests() {
echo ">> Starting tests for the resource ${1}"
create_resources ${1}
if ! run_tests ${1}; then
return 1
fi
return 0
}
function output_yaml_test_results() {
# If formatting fails for any reason, use yaml as a fall back.
kubectl get $1.tekton.dev -o=custom-columns-file=${REPO_ROOT_DIR}/test/columns.txt || \
kubectl get $1.tekton.dev -oyaml
}
function output_pods_logs() {
echo ">>> $1"
kubectl get $1.tekton.dev -o yaml
local runs=$(kubectl get $1.tekton.dev --output=jsonpath="{.items[*].metadata.name}")
set +e
for run in ${runs}; do
echo ">>>> $1 ${run}"
case "$1" in
"taskrun")
tkn taskrun logs ${run}
;;
"pipelinerun")
tkn pipelinerun logs ${run}
;;
esac
done
set -e
echo ">>>> Pods"
kubectl get pods -o yaml
}
|
tektoncd/pipeline
|
vendor/github.com/tektoncd/plumbing/scripts/library.sh
|
Shell
|
apache-2.0
| 15,562 |
# Functions
usage() {
echo "Usage: $0 --key <openstack ssh key name> --instance-name <name> [--rhel7] [-n] [--auth-key-file <file location>]"
echo ""
}
get_fault_info() {
local instance_info=$1
echo "$instance_info" | grep -E "fault"
}
wait_for_instance_running() {
local instance_name=$1
command="[ \$( nova show $instance_name | grep -cE \"status.*ACTIVE|status.*ERROR\" ) -eq 1 ]"
run_cmd_with_timeout "$command" ${2:-30}
instance_info="$(nova show $instance_name)"
status=$( echo "$instance_info" | awk '/status/ {print $4}' )
[ "$status" == "ERROR" ] && safe_out "error" "Instance $instance_name failed to boot \n$(get_fault_info \"$instance_info\")" && exit 2
}
wait_for_ssh() {
local instance_ip=$1
command="ssh -o StrictHostKeyChecking=no cloud-user@${instance_ip} 'ls' &>/dev/null"
run_cmd_with_timeout "$command" ${2:-60}
}
run_cmd_with_timeout() {
local command="$1"
local timeout=$2
next_wait_time=0
until eval "$command" || [ $next_wait_time -eq $timeout ]; do
safe_out "info" "Ran command at $next_wait_time: $command"
sleep $(( next_wait_time++ ))
done
[ $next_wait_time -eq $timeout ] && safe_out "error" "Command $command timed out after $timeout seconds."
}
safe_out() {
[ "$1" == "debug" ] && [ "${LOG_LEVEL}" == "debug" ] && echo "$1: $2" >> $LOGFILE
[ "$1" == "info" ] && ([ "${LOG_LEVEL}" == "info" ] || ["${LOG_LEVEL}" == "debug" ]) && echo "$1: $2" >> $LOGFILE
[ "$1" == "error" ] && echo "$1: $2" >> $LOGFILE
}
# Initialize environment
interactive="true"
LOGFILE=~/openstack_provision.log
LOG_LEVEL="info"
# Process options
while [[ $# -gt 0 ]] && [[ ."$1" = .--* ]] ;
do
opt=$1
shift
case "$opt" in
"--" ) break 2;;
"--key" )
key="$1"; shift;;
"--n")
unset interactive;;
"--instance-name")
instance_name="$1"; shift;;
"--rhel7")
image_name="rhel-guest-image-7.0-20140618.1";;
"--auth-key-file")
options="${options} --file /root/.ssh/authorized_keys=$1"; shift;;
"--debug")
LOG_LEVEL="debug";;
*) echo >&2 "Invalid option: $@"; exit 1;;
esac
done
if [ -z $key ] || [ -z $instance_name ]; then
echo "Missing argument key."
usage
exit 1;
fi
# Setup Environment and Gather Requirements
openstack_cred=${OPENSTACK_CRED_HOME:-~/.openstack/openrc.sh}
image_name_search=${image_name:-"rhel-guest-image-6.5-20140603.0"}
rc_file="${openstack_cred}"
#security_groups="default,osebroker,osenode"
security_groups="default"
flavor="m1.large"
#num_of_brokers=1
#num_of_nodes=1
if [ ! -f $rc_file ]; then
safe_out "error" "OpenStack API Credentials not found. Default location is ${rc_file}, or set OPENSTACK_CRED_HOME."
exit 1
fi
if [ -z $security_groups ]; then
options="${options} --security-groups ${security_groups}"
fi
. $rc_file
if [ "$interactive" = "true" ]; then
echo "Tail Logfile for More Info: ${LOGFILE}"
fi
# Provision VMs
image_ami=$(nova image-list | awk "/$image_name_search/"'{print $2}')
safe_out "debug" "nova boot --image ${image_ami} --flavor ${flavor} --key-name ${key} ${options} ${instance_name}"
status=$(nova boot --image ${image_ami} --flavor ${flavor} --key-name ${key} ${options} ${instance_name} | awk '/status/ {print $4}')
if [ "$status" != "BUILD" ]; then
echo "Something went wrong during image creation."
echo "Status expected: BUILD"
echo "Status received: $status"
exit 1
fi
safe_out "info" "Instance ${instance_name} created. Waiting for instance to start..."
# need to wait for instance to be in running state
wait_for_instance_running $instance_name
safe_out "info" "Instance ${instance_name} is active. Waiting for ssh service to be ready..."
instance_ip=$(nova show $instance_name | awk '/os1-internal.*network/ {print $6}')
# need to wait until ssh service comes up on instance
wait_for_ssh $instance_ip
safe_out "info" "Instance ${instance_name} is accessible and ready to use."
if [ "$interactive" = "true" ]; then
echo "Instance IP: ${instance_ip}"
else
echo "$instance_ip"
fi
|
redhat-consulting/ose-utils
|
installation/openstack/provision.sh
|
Shell
|
apache-2.0
| 4,015 |
#!/bin/bash
set -x
pushd /home/vagrant > /dev/null 2>&1
tar xvf /vagrant/rabbitmq-java-client-bin-3.4.0.tar.gz > /dev/null 2>&1
popd > /dev/null 2>&1
|
boundary/boundary-vagrant-rabbitmq
|
install-rabbitmq-client.sh
|
Shell
|
apache-2.0
| 154 |
# This script takes care of testing your crate
set -ex
# TODO This is the "test phase", tweak it as you see fit
main() {
cd rustual-boy-cli
cargo build
cargo build --release
if [ ! -z $DISABLE_TESTS ]; then
return
fi
cargo test
cargo test --release
}
# we don't run the "test phase" when doing deploys
if [ -z $TRAVIS_TAG ]; then
main
fi
|
jwestfall69/rustual-boy
|
ci/script.sh
|
Shell
|
apache-2.0
| 384 |
#!/bin/bash
#
# Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Test rules provided in Bazel not tested by examples
#
set -u
ADDITIONAL_BUILD_FLAGS=$1
WORKER_TYPE_LOG_STRING=$2
WORKER_PROTOCOL=$3
shift 3
# Load the test setup defined in the parent directory
CURRENT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${CURRENT_DIR}/../integration_test_setup.sh" \
|| { echo "integration_test_setup.sh not found!" >&2; exit 1; }
# TODO(philwo): Change this so the path to the custom worker gets passed in as an argument to the
# test, once the bug that makes using the "args" attribute with sh_tests in Bazel impossible is
# fixed.
example_worker=$(find $BAZEL_RUNFILES -name ExampleWorker_deploy.jar)
add_to_bazelrc "build -s"
add_to_bazelrc "build --spawn_strategy=worker,standalone"
add_to_bazelrc "build --experimental_worker_allow_json_protocol"
add_to_bazelrc "build --worker_verbose --worker_max_instances=1"
add_to_bazelrc "build --debug_print_action_contexts"
add_to_bazelrc "build --noexperimental_worker_multiplex"
add_to_bazelrc "build ${ADDITIONAL_BUILD_FLAGS}"
function set_up() {
# Run each test in a separate folder so that their output files don't get cached.
WORKSPACE_SUBDIR=$(basename $(mktemp -d ${WORKSPACE_DIR}/testXXXXXX))
cd ${WORKSPACE_SUBDIR}
BINS=$(bazel info $PRODUCT_NAME-bin)/${WORKSPACE_SUBDIR}
# This causes Bazel to shut down all running workers.
bazel build --worker_quit_after_build &> $TEST_log \
|| fail "'bazel build --worker_quit_after_build' during test set_up failed"
}
function write_hello_library_files() {
mkdir -p java/main
cat >java/main/BUILD <<EOF
java_binary(name = 'main',
deps = [':hello_library'],
srcs = ['Main.java'],
main_class = 'main.Main')
java_library(name = 'hello_library',
srcs = ['HelloLibrary.java']);
EOF
cat >java/main/Main.java <<EOF
package main;
import main.HelloLibrary;
public class Main {
public static void main(String[] args) {
HelloLibrary.funcHelloLibrary();
System.out.println("Hello, World!");
}
}
EOF
cat >java/main/HelloLibrary.java <<EOF
package main;
public class HelloLibrary {
public static void funcHelloLibrary() {
System.out.print("Hello, Library!;");
}
}
EOF
}
function test_compiles_hello_library_using_persistent_javac() {
write_hello_library_files
bazel build java/main:main &> $TEST_log \
|| fail "build failed"
expect_log "Created new ${WORKER_TYPE_LOG_STRING} Javac worker (id [0-9]\+)"
$BINS/java/main/main | grep -q "Hello, Library!;Hello, World!" \
|| fail "comparison failed"
}
function prepare_example_worker() {
cp ${example_worker} worker_lib.jar
chmod +w worker_lib.jar
echo "exampledata" > worker_data.txt
mkdir worker_data_dir
echo "veryexample" > worker_data_dir/more_data.txt
cat >work.bzl <<EOF
def _impl(ctx):
worker = ctx.executable.worker
output = ctx.outputs.out
argfile_inputs = []
argfile_arguments = []
if ctx.attr.multiflagfiles:
# Generate one flagfile per command-line arg, alternate between @ and --flagfile= style.
# This is used to test the code that handles multiple flagfiles and the --flagfile= style.
idx = 1
for arg in ["--output_file=" + output.path] + ctx.attr.args:
argfile = ctx.actions.declare_file("%s_worker_input_%s" % (ctx.label.name, idx))
ctx.actions.write(output=argfile, content=arg)
argfile_inputs.append(argfile)
flagfile_prefix = "@" if (idx % 2 == 0) else "--flagfile="
argfile_arguments.append(flagfile_prefix + argfile.path)
idx += 1
else:
# Generate the "@"-file containing the command-line args for the unit of work.
argfile = ctx.actions.declare_file("%s_worker_input" % ctx.label.name)
argfile_contents = "\n".join(["--output_file=" + output.path] + ctx.attr.args)
ctx.actions.write(output=argfile, content=argfile_contents)
argfile_inputs.append(argfile)
argfile_arguments.append("@" + argfile.path)
execution_requirements = {"supports-workers": "1", "requires-worker-protocol": "$WORKER_PROTOCOL"}
if ctx.attr.worker_key_mnemonic:
execution_requirements["worker-key-mnemonic"] = ctx.attr.worker_key_mnemonic
ctx.actions.run(
inputs=argfile_inputs + ctx.files.srcs,
outputs=[output],
executable=worker,
progress_message="Working on %s" % ctx.label.name,
mnemonic=ctx.attr.action_mnemonic,
execution_requirements=execution_requirements,
arguments=ctx.attr.worker_args + argfile_arguments,
)
work = rule(
implementation=_impl,
attrs={
"worker": attr.label(cfg="exec", mandatory=True, allow_files=True, executable=True),
"worker_args": attr.string_list(),
"worker_key_mnemonic": attr.string(),
"action_mnemonic": attr.string(default = "Work"),
"args": attr.string_list(),
"srcs": attr.label_list(allow_files=True),
"multiflagfiles": attr.bool(default=False),
},
outputs = {"out": "%{name}.out"},
)
EOF
cat >BUILD <<EOF
load(":work.bzl", "work")
java_import(
name = "worker_lib",
jars = ["worker_lib.jar"],
)
java_binary(
name = "worker",
main_class = "com.google.devtools.build.lib.worker.ExampleWorker",
runtime_deps = [
":worker_lib",
],
data = [
":worker_data.txt",
":worker_data_dir",
]
)
EOF
}
function test_example_worker() {
prepare_example_worker
cat >>BUILD <<EOF
work(
name = "hello_world",
worker = ":worker",
worker_args = ["--worker_protocol=${WORKER_PROTOCOL}"],
args = ["hello world"],
)
work(
name = "hello_world_uppercase",
worker = ":worker",
worker_args = ["--worker_protocol=${WORKER_PROTOCOL}"],
args = ["--uppercase", "hello world"],
)
EOF
bazel build :hello_world &> $TEST_log \
|| fail "build failed"
assert_equals "hello world" "$(cat $BINS/hello_world.out)"
bazel build :hello_world_uppercase &> $TEST_log \
|| fail "build failed"
assert_equals "HELLO WORLD" "$(cat $BINS/hello_world_uppercase.out)"
}
function test_worker_requests() {
prepare_example_worker
cat >>BUILD <<EOF
work(
name = "hello_world",
worker = ":worker",
worker_args = ["--worker_protocol=${WORKER_PROTOCOL}"],
args = ["hello world", "--print_requests"],
)
work(
name = "hello_world_uppercase",
worker = ":worker",
worker_args = ["--worker_protocol=${WORKER_PROTOCOL}"],
args = ["--uppercase", "hello world", "--print_requests"],
)
EOF
bazel build :hello_world &> $TEST_log \
|| fail "build failed"
assert_contains "hello world" "$BINS/hello_world.out"
assert_contains "arguments: \"hello world\"" "$BINS/hello_world.out"
assert_contains "path:.*hello_world_worker_input" "$BINS/hello_world.out"
assert_not_contains "request_id" "$BINS/hello_world.out"
bazel build :hello_world_uppercase &> $TEST_log \
|| fail "build failed"
assert_contains "HELLO WORLD" "$BINS/hello_world_uppercase.out"
assert_contains "arguments: \"hello world\"" "$BINS/hello_world_uppercase.out"
assert_contains "path:.*hello_world_uppercase_worker_input" "$BINS/hello_world_uppercase.out"
assert_not_contains "request_id" "$BINS/hello_world_uppercase.out"
}
function test_shared_worker() {
prepare_example_worker
cat >>BUILD <<EOF
work(
name = "hello_world",
worker = ":worker",
worker_args = ["--worker_protocol=${WORKER_PROTOCOL}"],
action_mnemonic = "Hello",
worker_key_mnemonic = "SharedWorker",
args = ["--write_uuid"],
)
work(
name = "goodbye_world",
worker = ":worker",
worker_args = ["--worker_protocol=${WORKER_PROTOCOL}"],
action_mnemonic = "Goodbye",
worker_key_mnemonic = "SharedWorker",
args = ["--write_uuid"],
)
EOF
bazel build :hello_world :goodbye_world &> $TEST_log \
|| fail "build failed"
worker_uuid_1=$(cat $BINS/hello_world.out | grep UUID | cut -d' ' -f2)
worker_uuid_2=$(cat $BINS/goodbye_world.out | grep UUID | cut -d' ' -f2)
assert_equals "$worker_uuid_1" "$worker_uuid_2"
}
function test_multiple_flagfiles() {
prepare_example_worker
cat >>BUILD <<EOF
work(
name = "multi_hello_world",
worker = ":worker",
worker_args = ["--worker_protocol=${WORKER_PROTOCOL}"],
args = ["hello", "world", "nice", "to", "meet", "you"],
multiflagfiles = True,
)
EOF
bazel build :multi_hello_world &> $TEST_log \
|| fail "build failed"
assert_equals "hello world nice to meet you" "$(cat $BINS/multi_hello_world.out)"
}
function test_workers_quit_after_build() {
prepare_example_worker
cat >>BUILD <<EOF
[work(
name = "hello_world_%s" % idx,
worker = ":worker",
worker_args = ["--worker_protocol=${WORKER_PROTOCOL}"],
args = ["--write_counter"],
) for idx in range(10)]
EOF
bazel build --worker_quit_after_build :hello_world_1 &> $TEST_log \
|| fail "build failed"
work_count=$(cat $BINS/hello_world_1.out | grep COUNTER | cut -d' ' -f2)
assert_equals "1" $work_count
bazel build --worker_quit_after_build :hello_world_2 &> $TEST_log \
|| fail "build failed"
work_count=$(cat $BINS/hello_world_2.out | grep COUNTER | cut -d' ' -f2)
# If the worker hadn't quit as we told it, it would have been reused, causing this to be a "2".
assert_equals "1" $work_count
}
function test_build_succeeds_even_if_worker_exits() {
prepare_example_worker
cat >>BUILD <<EOF
[work(
name = "hello_world_%s" % idx,
worker = ":worker",
worker_args = ["--exit_after=1", "--worker_protocol=${WORKER_PROTOCOL}"],
args = ["--write_uuid", "--write_counter"],
) for idx in range(10)]
EOF
# The worker dies after finishing the action, so the build succeeds.
bazel build --worker_verbose :hello_world_1 &> $TEST_log \
|| fail "build failed"
# This time, the worker is dead before the build starts, so a new one is made.
bazel build --worker_verbose :hello_world_2 &> $TEST_log \
|| fail "build failed"
expect_log "Work worker (id [0-9]\+) has unexpectedly died with exit code 0."
}
function test_build_fails_if_worker_dies_during_action() {
prepare_example_worker
cat >>BUILD <<EOF
[work(
name = "hello_world_%s" % idx,
worker = ":worker",
worker_args = ["--worker_protocol=${WORKER_PROTOCOL}","--exit_during=1"],
args = [
"--write_uuid",
"--write_counter",
],
) for idx in range(10)]
EOF
bazel build --worker_verbose :hello_world_1 &> $TEST_log \
&& fail "expected build to fail" || true
expect_log "Worker process did not return a WorkResponse:"
}
function test_worker_restarts_when_worker_binary_changes() {
prepare_example_worker
cat >>BUILD <<EOF
[work(
name = "hello_world_%s" % idx,
worker = ":worker",
worker_args = ["--worker_protocol=${WORKER_PROTOCOL}"],
args = ["--write_uuid", "--write_counter"],
) for idx in range(10)]
EOF
echo "First run" >> $TEST_log
bazel build :hello_world_1 &> $TEST_log \
|| fail "build failed"
worker_uuid_1=$(cat $BINS/hello_world_1.out | grep UUID | cut -d' ' -f2)
work_count=$(cat $BINS/hello_world_1.out | grep COUNTER | cut -d' ' -f2)
assert_equals "1" $work_count
echo "Second run" >> $TEST_log
bazel build :hello_world_2 &> $TEST_log \
|| fail "build failed"
worker_uuid_2=$(cat $BINS/hello_world_2.out | grep UUID | cut -d' ' -f2)
work_count=$(cat $BINS/hello_world_2.out | grep COUNTER | cut -d' ' -f2)
assert_equals "2" $work_count
# Check that the same worker was used twice.
assert_equals "$worker_uuid_1" "$worker_uuid_2"
# Modify the example worker jar to trigger a rebuild of the worker.
tr -cd '[:alnum:]' < /dev/urandom | head -c32 > dummy_file
zip worker_lib.jar dummy_file
rm dummy_file
bazel build :hello_world_3 &> $TEST_log \
|| fail "build failed"
worker_uuid_3=$(cat $BINS/hello_world_3.out | grep UUID | cut -d' ' -f2)
work_count=$(cat $BINS/hello_world_3.out | grep COUNTER | cut -d' ' -f2)
assert_equals "1" $work_count
expect_log "worker .* can no longer be used, because its files have changed on disk"
expect_log "worker_lib.jar: .* -> .*"
# Check that we used a new worker.
assert_not_equals "$worker_uuid_2" "$worker_uuid_3"
}
function test_worker_restarts_when_worker_runfiles_change() {
prepare_example_worker
cat >>BUILD <<EOF
[work(
name = "hello_world_%s" % idx,
worker = ":worker",
worker_args = ["--worker_protocol=${WORKER_PROTOCOL}"],
args = ["--write_uuid", "--write_counter"],
) for idx in range(10)]
EOF
bazel build :hello_world_1 &> $TEST_log \
|| fail "build failed"
worker_uuid_1=$(cat $BINS/hello_world_1.out | grep UUID | cut -d' ' -f2)
work_count=$(cat $BINS/hello_world_1.out | grep COUNTER | cut -d' ' -f2)
assert_equals "1" $work_count
bazel build :hello_world_2 &> $TEST_log \
|| fail "build failed"
worker_uuid_2=$(cat $BINS/hello_world_2.out | grep UUID | cut -d' ' -f2)
work_count=$(cat $BINS/hello_world_2.out | grep COUNTER | cut -d' ' -f2)
assert_equals "2" $work_count
# Check that the same worker was used twice.
assert_equals "$worker_uuid_1" "$worker_uuid_2"
# "worker_data.txt" is included in the "data" attribute of the example worker.
echo "changeddata" > worker_data.txt
bazel build :hello_world_3 &> $TEST_log \
|| fail "build failed"
worker_uuid_3=$(cat $BINS/hello_world_3.out | grep UUID | cut -d' ' -f2)
work_count=$(cat $BINS/hello_world_3.out | grep COUNTER | cut -d' ' -f2)
assert_equals "1" $work_count
expect_log "worker .* can no longer be used, because its files have changed on disk"
expect_log "worker_data.txt: .* -> .*"
# Check that we used a new worker.
assert_not_equals "$worker_uuid_2" "$worker_uuid_3"
}
# When a worker does not conform to the protocol and returns a response that is not a parseable
# protobuf, it must be killed and a helpful error message should be printed.
function test_build_fails_when_worker_returns_junk() {
prepare_example_worker
cat >>BUILD <<EOF
[work(
name = "hello_world_%s" % idx,
worker = ":worker",
worker_args = ["--poison_after=1", "--worker_protocol=${WORKER_PROTOCOL}"],
args = ["--write_uuid", "--write_counter"],
) for idx in range(10)]
EOF
bazel build :hello_world_1 &> $TEST_log \
|| fail "build failed"
# A failing worker should cause the build to fail.
bazel build :hello_world_2 &> $TEST_log \
&& fail "expected build to fail" || true
# Check that a helpful error message was printed.
expect_log "Worker process returned an unparseable WorkResponse!"
expect_log "Did you try to print something to stdout"
expect_log "I'm a poisoned worker and this is not a protobuf."
}
function test_input_digests() {
prepare_example_worker
cat >>BUILD <<EOF
[work(
name = "hello_world_%s" % idx,
worker = ":worker",
worker_args = ["--worker_protocol=${WORKER_PROTOCOL}"],
args = ["--write_uuid", "--print_inputs"],
srcs = [":input.txt"],
) for idx in range(10)]
EOF
echo "hello world" > input.txt
bazel build :hello_world_1 &> $TEST_log \
|| fail "build failed"
worker_uuid_1=$(cat $BINS/hello_world_1.out | grep UUID | cut -d' ' -f2)
hash1=$(egrep "INPUT .*/input.txt " $BINS/hello_world_1.out | cut -d' ' -f3)
bazel build :hello_world_2 >> $TEST_log 2>&1 \
|| fail "build failed"
worker_uuid_2=$(cat $BINS/hello_world_2.out | grep UUID | cut -d' ' -f2)
hash2=$(egrep "INPUT .*/input.txt " $BINS/hello_world_2.out | cut -d' ' -f3)
assert_equals "$worker_uuid_1" "$worker_uuid_2"
assert_equals "$hash1" "$hash2"
echo "changeddata" > input.txt
bazel build :hello_world_3 >> $TEST_log 2>&1 \
|| fail "build failed"
worker_uuid_3=$(cat $BINS/hello_world_3.out | grep UUID | cut -d' ' -f2)
hash3=$(egrep "INPUT .*/input.txt " $BINS/hello_world_3.out | cut -d' ' -f3)
assert_equals "$worker_uuid_2" "$worker_uuid_3"
assert_not_equals "$hash2" "$hash3"
}
function test_worker_verbose() {
prepare_example_worker
cat >>BUILD <<EOF
[work(
name = "hello_world_%s" % idx,
worker = ":worker",
worker_args = ["--worker_protocol=${WORKER_PROTOCOL}"],
args = ["--write_uuid", "--write_counter"],
) for idx in range(10)]
EOF
bazel build --worker_quit_after_build :hello_world_1 &> $TEST_log \
|| fail "build failed"
expect_log "Created new ${WORKER_TYPE_LOG_STRING} Work worker (id [0-9]\+)"
expect_log "Destroying Work worker (id [0-9]\+)"
expect_log "Build completed, shutting down worker pool..."
}
function test_logs_are_deleted_on_server_restart() {
prepare_example_worker
cat >>BUILD <<EOF
[work(
name = "hello_world_%s" % idx,
worker = ":worker",
worker_args = ["--worker_protocol=${WORKER_PROTOCOL}"],
args = ["--write_uuid", "--write_counter"],
) for idx in range(10)]
EOF
bazel build --worker_quit_after_build :hello_world_1 &> $TEST_log \
|| fail "build failed"
expect_log "Created new ${WORKER_TYPE_LOG_STRING} Work worker (id [0-9]\+)"
worker_log=$(egrep -o -- 'logging to .*/b(azel|laze)-workers/worker-[0-9]-Work.log' "$TEST_log" | sed 's/^logging to //')
[ -e "$worker_log" ] \
|| fail "Worker log was not found"
# Running a build after a server shutdown should trigger the removal of old worker log files.
bazel shutdown &> $TEST_log
bazel build &> $TEST_log
[ ! -e "$worker_log" ] \
|| fail "Worker log was not deleted"
}
function test_requires_worker_protocol_missing_defaults_to_proto {
prepare_example_worker
cat >>BUILD <<EOF
work(
name = "hello_world_proto",
worker = ":worker",
worker_args = ["--worker_protocol=proto"],
args = ["hello world"],
)
work(
name = "hello_world_json",
worker = ":worker",
worker_args = ["--worker_protocol=json"],
)
EOF
sed -i.bak 's/=execution_requirements/={"supports-workers": "1"}/g' work.bzl
rm -f work.bzl.bak
bazel build :hello_world_proto &> $TEST_log \
|| fail "build failed"
assert_equals "hello world" "$(cat $BINS/hello_world_proto.out)"
bazel build :hello_world_json &> $TEST_log \
&& fail "expected proto build with json worker to fail" || true
}
function test_missing_execution_requirements_fallback_to_standalone() {
prepare_example_worker
# This test ignores the WORKER_PROTOCOL test arg since it doesn't use the
# persistent worker when execution falls back to standalone.
cat >>BUILD <<EOF
work(
name = "hello_world",
worker = ":worker",
args = ["--write_uuid", "--write_counter"],
)
EOF
sed -i.bak '/execution_requirements=execution_requirements/d' work.bzl
rm -f work.bzl.bak
bazel build --worker_quit_after_build :hello_world &> $TEST_log \
|| fail "build failed"
expect_not_log "Created new ${WORKER_TYPE_LOG_STRING} Work worker (id [0-9]\+)"
expect_not_log "Destroying Work worker (id [0-9]\+)"
# WorkerSpawnStrategy falls back to standalone strategy, so we still expect the output to be generated.
[ -e "$BINS/hello_world.out" ] \
|| fail "Worker did not produce output"
}
function test_environment_is_clean() {
prepare_example_worker
cat >>BUILD <<EOF
work(
name = "hello_world",
worker = ":worker",
worker_args = ["--worker_protocol=${WORKER_PROTOCOL}"],
args = ["--print_env"],
)
EOF
bazel shutdown &> $TEST_log \
|| fail "shutdown failed"
CAKE=LIE bazel build --worker_quit_after_build :hello_world &> $TEST_log \
|| fail "build failed"
fgrep CAKE=LIE $BINS/hello_world.out \
&& fail "environment variable leaked into worker env" || true
}
function test_workers_quit_on_clean() {
prepare_example_worker
cat >>BUILD <<EOF
work(
name = "hello_clean",
worker = ":worker",
worker_args = ["--worker_protocol=${WORKER_PROTOCOL}"],
args = ["hello clean"],
)
EOF
bazel build :hello_clean &> $TEST_log \
|| fail "build failed"
assert_equals "hello clean" "$(cat $BINS/hello_clean.out)"
expect_log "Created new ${WORKER_TYPE_LOG_STRING} Work worker (id [0-9]\+)"
bazel clean &> $TEST_log \
|| fail "clean failed"
expect_log "Clean command is running, shutting down worker pool..."
expect_log "Destroying Work worker (id [0-9]\+)"
}
function test_crashed_worker_causes_log_dump() {
prepare_example_worker
cat >>BUILD <<EOF
[work(
name = "hello_world_%s" % idx,
worker = ":worker",
worker_args = [
"--poison_after=1",
"--hard_poison",
"--worker_protocol=${WORKER_PROTOCOL}"
],
args = ["--write_uuid", "--write_counter"],
) for idx in range(10)]
EOF
bazel build :hello_world_1 &> $TEST_log \
|| fail "build failed"
bazel build :hello_world_2 &> $TEST_log \
&& fail "expected build to fail" || true
expect_log "^---8<---8<--- Start of log, file at /"
expect_log "Worker process did not return a WorkResponse:"
expect_log "I'm a very poisoned worker and will just crash."
expect_log "^---8<---8<--- End of log ---8<---8<---"
}
run_suite "Worker integration tests"
|
cushon/bazel
|
src/test/shell/integration/bazel_worker_test.sh
|
Shell
|
apache-2.0
| 21,185 |
#!/bin/bash
#
# Copyright (C) 2010, 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
set -e
set -o pipefail
export PYTHON=${PYTHON:=python3}
CCE=tools/check-cert-expired
err() {
echo "$@"
echo 'Aborting'
exit 1
}
impexpd_helper() {
$PYTHON "${TOP_SRCDIR:-.}/test/py/import-export_unittest-helper" "$@"
}
$CCE 2>/dev/null && err 'Accepted empty argument list'
$CCE foo bar 2>/dev/null && err 'Accepted more than one argument'
$CCE foo bar baz 2>/dev/null && err 'Accepted more than one argument'
tmpdir=$(mktemp -d)
trap "rm -rf $tmpdir" EXIT
[[ -f "$tmpdir/cert-not" ]] && err 'File existed when it should not'
$CCE $tmpdir/cert-not 2>/dev/null && err 'Accepted non-existent file'
VALIDITY=1 impexpd_helper $tmpdir/cert-valid gencert
$CCE $tmpdir/cert-valid 2>/dev/null && \
err 'Reported valid certificate as expired'
VALIDITY=-50 impexpd_helper $tmpdir/cert-expired gencert
$CCE $tmpdir/cert-expired 2>/dev/null || \
err 'Reported expired certificate as valid'
echo > $tmpdir/cert-invalid
$CCE $tmpdir/cert-invalid 2>/dev/null && \
err 'Reported invalid certificate as expired'
echo 'Hello World' > $tmpdir/cert-invalid2
$CCE $tmpdir/cert-invalid2 2>/dev/null && \
err 'Reported invalid certificate as expired'
exit 0
|
ganeti/ganeti
|
test/py/check-cert-expired_unittest.bash
|
Shell
|
bsd-2-clause
| 2,520 |
#!/usr/bin/env sh
# Note: pipefail is not supported in POSIX shell and will be silently ignored, unless bash is used
set -eo pipefail
# Do some tests and exit with either 0 for healthy or 1 for unhealthy
echo "Fix me or this docker-healthcheck.sh will never succeed!"
false || exit 1
exit 0
|
edannenberg/kubler
|
template/docker/image/docker-healthcheck.sh
|
Shell
|
bsd-2-clause
| 294 |
#!/usr/bin/env bash
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This script is invoked by Jenkins and triggers a test run based on
# env variable settings.
#
# Setting up rvm environment BEFORE we set -ex.
[[ -s /etc/profile.d/rvm.sh ]] && . /etc/profile.d/rvm.sh
# To prevent cygwin bash complaining about empty lines ending with \r
# we set the igncr option. The option doesn't exist on Linux, so we fallback
# to just 'set -ex' there.
# NOTE: No empty lines should appear in this file before igncr is set!
set -ex -o igncr || set -ex
# Grabbing the machine's architecture
arch=`uname -m`
case $platform in
i386)
arch="i386"
platform="linux"
docker_suffix=_32bits
;;
esac
if [ "$platform" == "linux" ]
then
echo "building $language on Linux"
./tools/run_tests/run_tests.py --use_docker -t -l $language -c $config -x report.xml $@ || true
elif [ "$platform" == "windows" ]
then
echo "building $language on Windows"
# Prevent msbuild from picking up "platform" env variable, which would break the build
unset platform
python tools/run_tests/run_tests.py -t -l $language -x report.xml $@ || true
elif [ "$platform" == "macos" ]
then
echo "building $language on MacOS"
./tools/run_tests/run_tests.py -t -l $language -c $config -x report.xml $@ || true
elif [ "$platform" == "freebsd" ]
then
echo "building $language on FreeBSD"
MAKE=gmake ./tools/run_tests/run_tests.py -t -l $language -c $config -x report.xml $@ || true
elif [ "$platform" == "interop" ]
then
echo "building interop tests for language $language"
./tools/run_tests/run_interop_tests.py --use_docker -t -l $language --cloud_to_prod --server all || true
else
echo "Unknown platform $platform"
exit 1
fi
if [ ! -e reports/index.html ]
then
mkdir -p reports
echo 'No reports generated.' > reports/index.html
fi
|
w4-sjcho/grpc
|
tools/jenkins/run_jenkins.sh
|
Shell
|
bsd-3-clause
| 3,324 |
#
# This file is part of the CernVM File System
# This script takes care of creating, removing, and maintaining repositories
# on a Stratum 0/1 server
#
# Implementation of the "cvmfs_server import" command
# This file depends on fuctions implemented in the following files:
# - cvmfs_server_util.sh
# - cvmfs_server_common.sh
# - cvmfs_server_ssl.sh
# - cvmfs_server_apache.sh
# - cvmfs_server_json.sh
# - cvmfs_server_transaction.sh
# - cvmfs_server_publish.sh
# - cvmfs_server_masterkeycard.sh
IMPORT_DESASTER_REPO_NAME=""
IMPORT_DESASTER_MANIFEST_BACKUP=""
IMPORT_DESASTER_MANIFEST_SIGNED=0
_import_desaster_cleanup() {
local name="$IMPORT_DESASTER_REPO_NAME"
if [ x"$name" = x"" ]; then
return 0
fi
unmount_and_teardown_repository $name
remove_spool_area $name
remove_config_files $name
if [ $IMPORT_DESASTER_MANIFEST_SIGNED -ne 0 ] && \
[ x$IMPORT_DESASTER_MANIFEST_BACKUP != x"" ]; then
echo "Manifest was overwritten. If needed here is a backup: $IMPORT_DESASTER_MANIFEST_BACKUP"
fi
}
# This command needs transaction + publish
migrate_legacy_dirtab() {
local name=$1
local dirtab_path="/cvmfs/${name}/.cvmfsdirtab"
local tmp_dirtab=$(mktemp)
cp -f "$dirtab_path" "$tmp_dirtab" || return 1
cvmfs_server_transaction $name > /dev/null || return 2
cat "$tmp_dirtab" | sed -e 's/\(.*\)/\1\/\*/' > $dirtab_path || return 3
cvmfs_server_publish $name > /dev/null || return 4
rm -f "$tmp_dirtab" || return 5
}
cvmfs_server_import() {
local name
local stratum0
local keys_location="/etc/cvmfs/keys"
local upstream
local owner
local file_ownership
local is_legacy=0
local show_statistics=0
local replicable=0
local chown_backend=0
local unionfs=
local recreate_whitelist=0
local configure_apache=1
local recreate_repo_key=0
local require_masterkeycard=0
# parameter handling
OPTIND=1
while getopts "w:o:c:u:k:lsmgf:rptR" option; do
case $option in
w)
stratum0=$OPTARG
;;
o)
owner=$OPTARG
;;
c)
file_ownership=$OPTARG
;;
u)
upstream=$OPTARG
;;
k)
keys_location=$OPTARG
;;
l)
is_legacy=1
;;
s)
show_statistics=1
;;
m)
replicable=1
;;
g)
chown_backend=1
;;
f)
unionfs=$OPTARG
;;
r)
recreate_whitelist=1
;;
p)
configure_apache=0
;;
t)
recreate_repo_key=1
;;
R)
recreate_whitelist=1
require_masterkeycard=1
;;
?)
shift $(($OPTIND-2))
usage "Command import: Unrecognized option: $1"
;;
esac
done
# get repository name
shift $(($OPTIND-1))
check_parameter_count 1 $#
name=$(get_repository_name $1)
# default values
[ x"$stratum0" = x ] && stratum0="$(mangle_local_cvmfs_url $name)"
[ x"$upstream" = x ] && upstream=$(make_local_upstream $name)
[ x"$unionfs" = x ] && unionfs="$(get_available_union_fs)"
local private_key="${name}.key"
local master_key="${name}.masterkey"
local certificate="${name}.crt"
local public_key="${name}.pub"
# sanity checks
check_repository_existence $name && die "The repository $name already exists"
is_root || die "Only root can create a new repository"
check_upstream_validity $upstream
check_cvmfs2_client || die "cvmfs client missing"
check_autofs_on_cvmfs && die "Autofs on /cvmfs has to be disabled"
check_apache || die "Apache must be installed and running"
is_local_upstream $upstream || die "Import only works locally for the moment"
ensure_swissknife_suid $unionfs || die "Need CAP_SYS_ADMIN for cvmfs_swissknife"
lower_hardlink_restrictions
ensure_enabled_apache_modules
[ x"$keys_location" = "x" ] && die "Please provide the location of the repository security keys (-k)"
if [ $unionfs = "overlayfs" ]; then
local msg
msg="`check_overlayfs_version`" || die "$msg"
echo "Warning: CernVM-FS filesystems using overlayfs may not enforce hard link semantics during publishing."
else
check_aufs || die "aufs kernel module missing"
fi
# repository owner dialog
local cvmfs_user=$(get_cvmfs_owner $name $owner)
check_user $cvmfs_user || die "No user $cvmfs_user"
[ x"$file_ownership" = x ] && file_ownership="$(id -u $cvmfs_user):$(id -g $cvmfs_user)"
echo $file_ownership | grep -q "^[0-9][0-9]*:[0-9][0-9]*$" || die "Unrecognized file ownership: $file_ownership | expected: <uid>:<gid>"
local cvmfs_uid=$(echo $file_ownership | cut -d: -f1)
local cvmfs_gid=$(echo $file_ownership | cut -d: -f2)
# investigate the given repository storage for sanity
local storage_location=$(get_upstream_config $upstream)
local needed_items="${storage_location} \
${storage_location}/.cvmfspublished \
${storage_location}/data \
${storage_location}/data/txn"
local i=0
while [ $i -lt 256 ]; do
needed_items="$needed_items ${storage_location}/data/$(printf "%02x" $i)"
i=$(($i+1))
done
for item in $needed_items; do
[ -e $item ] || die "$item missing"
[ $chown_backend -ne 0 ] || [ x"$cvmfs_user" = x"$(stat -c%U $item)" ] || die "$item not owned by $cvmfs_user"
done
# check availability of repository signing key and certificate
local keys="$public_key"
if [ $recreate_repo_key -eq 0 ]; then
if [ ! -f ${keys_location}/${private_key} ] || \
[ ! -f ${keys_location}/${certificate} ]; then
die "repository signing key or certificate not found (use -t maybe?)"
fi
keys="$keys $private_key $certificate"
else
[ $recreate_whitelist -ne 0 ] || die "using -t implies whitelist recreation (use -r maybe?)"
fi
# check whitelist expiry date
if [ $recreate_whitelist -eq 0 ]; then
cvmfs_sys_file_is_regular "${storage_location}/.cvmfswhitelist" || die "didn't find ${storage_location}/.cvmfswhitelist"
local expiry=$(get_expiry_from_string "$(cat "${storage_location}/.cvmfswhitelist")")
[ $expiry -gt 0 ] || die "Repository whitelist expired (use -r maybe?)"
elif [ $require_masterkeycard -eq 1 ]; then
local reason
reason="`masterkeycard_cert_available`" || die "masterkeycard not available to create whitelist: $reason"
elif ! cvmfs_sys_file_is_regular ${keys_location}/${master_key}; then
masterkeycard_cert_available >/dev/null || die "Neither masterkey nor masterkeycard found for recreating whitelist"
fi
# set up desaster cleanup
IMPORT_DESASTER_REPO_NAME="$name"
trap _import_desaster_cleanup EXIT HUP INT QUIT TERM
# create the configuration for the new repository
# TODO(jblomer): make a better guess for hash and compression algorithm (see
# also reflog creation)
echo -n "Creating configuration files... "
create_config_files_for_new_repository "$name" \
"$upstream" \
"$stratum0" \
"$cvmfs_user" \
"$unionfs" \
"sha1" \
"true" \
"false" \
"$configure_apache" \
"default" \
"false" \
"" \
"" || die "fail!"
echo "done"
# import the old repository security keys
echo -n "Importing the given key files... "
if [ $require_masterkeycard -eq 0 ] && \
cvmfs_sys_file_is_regular ${keys_location}/${master_key} ; then
keys="$keys $master_key"
fi
import_keychain $name "$keys_location" $cvmfs_user "$keys" > /dev/null || die "fail!"
echo "done"
# create storage
echo -n "Creating CernVM-FS Repository Infrastructure... "
create_spool_area_for_new_repository $name || die "fail!"
if [ $configure_apache -eq 1 ]; then
reload_apache > /dev/null || die "fail!"
fi
echo "done"
# create reflog checksum
if cvmfs_sys_file_is_regular ${storage_location}/.cvmfsreflog ; then
echo -n "Re-creating reflog content hash... "
local reflog_hash=$(cat ${storage_location}/.cvmfsreflog | cvmfs_swissknife hash -a sha1)
echo -n $reflog_hash > "${CVMFS_SPOOL_DIR}/reflog.chksum"
chown $CVMFS_USER "${CVMFS_SPOOL_DIR}/reflog.chksum"
echo $reflog_hash
fi
# load repository configuration file
load_repo_config $name
local temp_dir="${CVMFS_SPOOL_DIR}/tmp"
# import storage location
if [ $chown_backend -ne 0 ]; then
echo -n "Importing CernVM-FS storage... "
chown -R $cvmfs_user $storage_location || die "fail!"
set_selinux_httpd_context_if_needed $storage_location || die "fail!"
echo "done"
fi
# Let Apache finish reload (needs to happen after SElinux adjustment)
if [ $configure_apache -eq 1 ]; then
wait_for_apache "${stratum0}/.cvmfswhitelist" || die "fail (Apache configuration)"
fi
# creating a new repository signing key if requested
if [ $recreate_repo_key -ne 0 ]; then
echo -n "Creating new repository signing key... "
local manifest_url="${CVMFS_STRATUM0}/.cvmfspublished"
local unsigned_manifest="${CVMFS_SPOOL_DIR}/tmp/unsigned_manifest"
create_cert $name $CVMFS_USER || die "fail (certificate creation)!"
local old_manifest
old_manifest="`get_item $name $manifest_url`" || die "fail (manifest download)!"
echo "$old_manifest" | strip_manifest_signature - > $unsigned_manifest \
|| die "fail (manifest signature strip)!"
chown $CVMFS_USER $unsigned_manifest || die "fail (manifest chown)!"
sign_manifest $name $unsigned_manifest || die "fail (manifest resign)!"
echo "done"
fi
# recreate whitelist if requested
if [ $recreate_whitelist -ne 0 ]; then
create_whitelist $name $CVMFS_USER \
${CVMFS_UPSTREAM_STORAGE} \
${CVMFS_SPOOL_DIR}/tmp || die "fail!"
fi
# migrate old catalogs
if [ $is_legacy -ne 0 ]; then
echo "Migrating old catalogs (may take a while)... "
local new_manifest="${temp_dir}/new_manifest"
local statistics_flag
if [ $show_statistics -ne 0 ]; then
statistics_flag="-s"
fi
IMPORT_DESASTER_MANIFEST_BACKUP="${storage_location}/.cvmfspublished.bak"
cp ${storage_location}/.cvmfspublished \
$IMPORT_DESASTER_MANIFEST_BACKUP || die "fail! (cannot backup .cvmfspublished)"
__swissknife migrate \
-v "2.0.x" \
-r $storage_location \
-n $name \
-u $upstream \
-t $temp_dir \
-k "/etc/cvmfs/keys/$public_key" \
-o $new_manifest \
-p $cvmfs_uid \
-g $cvmfs_gid \
-f \
$statistics_flag || die "fail! (migration)"
chown $cvmfs_user $new_manifest || die "fail! (chown manifest)"
# sign new (migrated) repository revision
echo -n "Signing newly imported Repository... "
local user_shell="$(get_user_shell $name)"
sign_manifest $name $new_manifest || die "fail! (cannot sign repo)"
IMPORT_DESASTER_MANIFEST_SIGNED=1
echo "done"
fi
# do final setup
echo -n "Mounting CernVM-FS Storage... "
setup_and_mount_new_repository $name || die "fail!"
echo "done"
# the .cvmfsdirtab semantics might need an update
if [ $is_legacy -ne 0 ] && cvmfs_sys_file_is_regular /cvmfs/${name}/.cvmfsdirtab ; then
echo -n "Migrating .cvmfsdirtab... "
migrate_legacy_dirtab $name || die "fail!"
echo "done"
fi
# make stratum0 repository replicable if requested
if [ $replicable -eq 1 ]; then
cvmfs_server_alterfs -m on $name
fi
echo -n "Updating global JSON information... "
update_global_repository_info && echo "done" || echo "fail"
# reset trap and finish
trap - EXIT HUP INT QUIT TERM
print_new_repository_notice $name $cvmfs_user 1
# print warning if OverlayFS is used for repository management
if [ x"$CVMFS_UNION_FS_TYPE" = x"overlayfs" ]; then
echo ""
echo "WARNING: You are using OverlayFS which cannot handle hard links."
echo " If the imported repository '${name}' used to be based on"
echo " AUFS, please run the following command NOW to remove hard"
echo " links from the catalogs:"
echo ""
echo " cvmfs_server eliminate-hardlinks ${name}"
echo ""
fi
}
|
Gangbiao/cvmfs
|
cvmfs/server/cvmfs_server_import.sh
|
Shell
|
bsd-3-clause
| 13,144 |
ck rm experiment:demo-autotune-flags-susan-linux-*
|
ctuning/ck-autotuning
|
demo/autotuning-compiler-flags-susan-linux/_clean_autotune_entries.sh
|
Shell
|
bsd-3-clause
| 51 |
#!/bin/sh
echo "Generating a new SSH key for GitHub..."
# Generating a new SSH key
# https://docs.github.com/en/github/authenticating-to-github/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent#generating-a-new-ssh-key
ssh-keygen -t ed25519 -C $1 -f ~/.ssh/id_ed25519
# Adding your SSH key to the ssh-agent
# https://docs.github.com/en/github/authenticating-to-github/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent#adding-your-ssh-key-to-the-ssh-agent
eval "$(ssh-agent -s)"
touch ~/.ssh/config
echo "Host *\n AddKeysToAgent yes\n UseKeychain yes\n IdentityFile ~/.ssh/id_ed25519" | tee ~/.ssh/config
ssh-add -K ~/.ssh/id_ed25519
# Adding your SSH key to your GitHub account
# https://docs.github.com/en/github/authenticating-to-github/adding-a-new-ssh-key-to-your-github-account
echo "run 'pbcopy < ~/.ssh/id_ed25519.pub' and paste that into GitHub"
|
davosian/dotfiles
|
ssh.sh
|
Shell
|
mit
| 872 |
#Given Kallisto's abundance calculations per SRA run per transcript, grouped into sets originating from the same tissue.
#These grouped calculations are the output of fetchAndRun.sh, symlinked locally named thyroid.all and heart.all for two tissues
#We compute the standard deviation between runs originating from the same tissue for each transcript.
#Heuristically, we descard the lowest quintile ranked on (std_dev(expression)/expression) to discard the
# transcripts whose reported abundance is most variable between runs of similar origin. Subsequent analysis
# can be done both on all transcripts and on the union of cleanest 80% of both.
#Then we compute fold change normalized by (sqrt(std_dev_1*std_dev_1 + std_dev_2*std_dev_2)).
perl -ane '{$sum=0; $sum2=0; for ($i=1;$i<=$#F;$i++){$sum += $F[$i]; $sum2 += $F[$i]*$F[$i]; } $stdev = sqrt( ($sum2-$sum* $sum/($#F))/($#F) ); print $F[0],"\t",$sum/($#F),"\t",$stdev,"\n"}' thyroid.all > thyroid.sum
perl -ane '{$sum=0; $sum2=0; for ($i=1;$i<=$#F;$i++){$sum += $F[$i]; $sum2 += $F[$i]*$F[$i]; } $stdev = sqrt( ($sum2-$sum* $sum/($#F))/($#F) ); print $F[0],"\t",$sum/($#F),"\t",$stdev,"\n"}' heart.all > heart.sum
sort heart.sum > heart.sum.s
sort thyroid.sum > thyroid.sum.s
join heart,sum.s thyroid.sum.s > heart_thyroid.sum
perl -ane '{print $F[0],"\t",$F[2]/(0.0001+$F[1]),"\t",$F[1],"\n"}' heart_thyroid.sum > heart.spread
sort -k 2,2nr heart.spread | sed -n '1,32623p' | grep NM_ | sort -k 3,3n
sort -k 2,2nr heart.spread | sed -n '32623,$p' | sort > heart.clean80
perl -ane '{print $F[0],"\t",$F[4]/(0.0001+$F[3]),"\t",$F[3],"\n"}' heart_thyroid.sum > thymus.spread
sort -k 2,2nr thymus.spread | sed -n '32623,$p' | sort > thymus.clean80
join -0 1.1 thymus.clean80 heart.clean80
join -o 1.1 thymus.clean80 heart.clean80
join -o 1.1 thymus.clean80 heart.clean80 > both.clean80
ls
head heart_thyroid.ratio
perl -ane '{print $F[0],"\t",log((0.000001+$F[1])/(0.000001+$F[3])),"\t", sqrt($F[2]*$F[2]+$F[4]*$F[4]),"\t",$F[1],"\t",$F[3],"\n"}' heart_thyroid.sum > heart_thyroid.ratio
sort heart_thyroid.sum | join - both.80 > heart_thyroid.sum80
ls
sort heart_thyroid.sum | join - both.clean80 > heart_thyroid.sum80
perl -ane '{print $F[0],"\t",log((0.000001+$F[1])/(0.000001+$F[3])),"\t", sqrt($F[2]*$F[2]+$F[4]*$F[4]),"\t",$F[1],"\t",$F[3],"\n"}' heart_thyroid.sum80 > heart_thyroid.ratio80
|
NCBI-Hackathons/RNA-seq_Comparison_Pipeline
|
analysis/cleanest_transcript_ratio.sh
|
Shell
|
cc0-1.0
| 2,376 |
INITEX="platex -ini"
LATEX=platex
FORMAT=platex
BIBTEX=jbibtex
FMT=fmt
|
kenhys/tokyodebian-monthly-report
|
image200812/minimaltex/whizzy.sh
|
Shell
|
gpl-2.0
| 71 |
#!/bin/sh
# Copyright (C) 2009-2013 OpenWrt.org
. /lib/functions/leds.sh
. /lib/ar71xx.sh
get_status_led() {
case $(ar71xx_board_name) in
alfa-nx)
status_led="alfa:green:led_8"
;;
all0305)
status_led="eap7660d:green:ds4"
;;
ap132)
status_led="ap132:green:status"
;;
ap136-010|\
ap136-020)
status_led="ap136:green:status"
;;
ap135-020)
status_led="ap135:green:status"
;;
ap81)
status_led="ap81:green:status"
;;
ap83)
status_led="ap83:green:power"
;;
ap96)
status_led="ap96:green:led2"
;;
aw-nr580)
status_led="aw-nr580:green:ready"
;;
bullet-m | rocket-m | nano-m | nanostation-m | nanostation-m-xw)
status_led="ubnt:green:link4"
;;
bxu2000n-2-a1)
status_led="bhu:green:status"
;;
cap4200ag)
status_led="senao:green:pwr"
;;
db120)
status_led="db120:green:status"
;;
dir-505-a1 |\
dir-600-a1 |\
dir-615-e1 |\
dir-615-e4)
status_led="d-link:green:power"
;;
dir-615-c1)
status_led="d-link:green:status"
;;
dir-825-b1)
status_led="d-link:orange:power"
;;
dir-825-c1 |\
dir-835-a1)
status_led="d-link:amber:power"
;;
dragino2)
status_led="dragino2:red:system"
;;
eap300v2)
status_led="engenius:blue:power"
;;
eap7660d)
status_led="eap7660d:green:ds4"
;;
el-mini | \
el-m150)
status_led="EasyLink:green:system"
;;
gl-inet)
status_led="gl-connect:green:lan"
;;
esr1750)
status_led="esr1750:amber:power"
;;
esr900)
status_led="engenius:amber:power"
;;
hiwifi-hc6361)
status_led="hiwifi:blue:system"
;;
hornet-ub)
status_led="alfa:blue:wps"
;;
ja76pf | \
ja76pf2)
status_led="jjplus:green:led1"
;;
ls-sr71)
status_led="ubnt:green:d22"
;;
mr600)
status_led="mr600:orange:power"
;;
mr600v2)
status_led="mr600:blue:power"
;;
mynet-n600 | \
mynet-n750)
status_led="wd:blue:power"
;;
mynet-rext)
status_led="wd:blue:power"
;;
mzk-w04nu | \
mzk-w300nh)
status_led="planex:green:status"
;;
nbg460n_550n_550nh)
status_led="nbg460n:green:power"
;;
nbg6716)
status_led="nbg6716:white:power"
;;
om2p | \
om2pv2 | \
om2p-hs | \
om2p-hsv2 | \
om2p-lc)
status_led="om2p:blue:power"
;;
om5p)
status_led="om5p:blue:power"
;;
pb44)
status_led="pb44:amber:jump1"
;;
rb-2011l|\
rb-2011uas|\
rb-2011uas-2hnd)
status_led="rb:green:usr"
;;
rb-411 | rb-411u | rb-433 | rb-433u | rb-450 | rb-450g | rb-493)
status_led="rb4xx:yellow:user"
;;
rb-750)
status_led="rb750:green:act"
;;
rb-911g-2hpnd|\
rb-911g-5hpnd|\
rb-912uag-2hpnd|\
rb-912uag-5hpnd)
status_led="rb:green:user"
;;
rb-951ui-2hnd)
status_led="rb:green:act"
;;
rb-sxt2n|\
rb-sxt5n)
status_led="rb:green:power"
;;
routerstation | routerstation-pro)
status_led="ubnt:green:rf"
;;
rw2458n)
status_led="rw2458n:green:d3"
;;
smart-300)
status_led="nc-link:green:system"
;;
oolite)
status_led="oolite:red:system"
;;
tew-632brp)
status_led="tew-632brp:green:status"
;;
tew-673gru)
status_led="trendnet:blue:wps"
;;
tew-712br|\
tew-732br)
status_led="trendnet:green:power"
;;
tl-mr3020)
status_led="tp-link:green:wps"
;;
tl-wa750re)
status_led="tp-link:orange:re"
;;
tl-wa850re)
status_led="tp-link:blue:re"
;;
tl-wa860re)
status_led="tp-link:green:power"
;;
tl-mr3220 | \
tl-mr3220-v2 | \
tl-mr3420 | \
tl-mr3420-v2 | \
tl-wa701nd-v2 | \
tl-wa801nd-v2 | \
tl-wa901nd | \
tl-wa901nd-v2 | \
tl-wa901nd-v3 | \
tl-wdr3500 | \
tl-wr1041n-v2 | \
tl-wr1043nd | \
tl-wr1043nd-v2 | \
tl-wr741nd | \
tl-wr741nd-v4 | \
tl-wr841n-v1 | \
tl-wr841n-v7 | \
tl-wr841n-v8 | \
tl-wa830re-v2 | \
tl-wr842n-v2 | \
tl-wr941nd)
status_led="tp-link:green:system"
;;
archer-c5 | \
archer-c7 | \
tl-wdr4900-v2 | \
tl-mr10u | \
tl-mr13u | \
tl-wdr4300 | \
tl-wr703n | \
tl-wr710n | \
gr-wr001 | \
tl-wr720n-v3)
status_led="tp-link:blue:system"
;;
tl-wr841n-v9)
status_led="tp-link:green:qss"
;;
tl-wr2543n)
status_led="tp-link:green:wps"
;;
tube2h)
status_led="alfa:green:signal4"
;;
unifi)
status_led="ubnt:green:dome"
;;
uap-pro)
status_led="ubnt:white:dome"
;;
airgateway)
status_led="ubnt:white:status"
;;
whr-g301n | \
whr-hp-g300n | \
whr-hp-gn | \
wzr-hp-g300nh)
status_led="buffalo:green:router"
;;
wlae-ag300n)
status_led="buffalo:green:status"
;;
wzr-hp-ag300h | \
wzr-hp-g300nh2)
status_led="buffalo:red:diag"
;;
wndap360 | \
wndr3700 | \
wndr3700v4 | \
wndr4300 | \
wnr2000 | \
wnr2200 |\
wnr612-v2)
status_led="netgear:green:power"
;;
wp543)
status_led="wp543:green:diag"
;;
wrt400n)
status_led="wrt400n:blue:wps"
;;
wrt160nl)
status_led="wrt160nl:blue:wps"
;;
zcn-1523h-2 | zcn-1523h-5)
status_led="zcn-1523h:amber:init"
;;
wlr8100)
status_led="sitecom:amber:status"
;;
esac
}
set_state() {
get_status_led
case "$1" in
preinit)
status_led_blink_preinit
;;
failsafe)
status_led_blink_failsafe
;;
done)
status_led_on
;;
esac
}
|
lxl1140989/dmsdk
|
target/linux/ar71xx/base-files/etc/diag.sh
|
Shell
|
gpl-2.0
| 4,976 |
#!/bin/sh
set -e
export LANG=C
iface="$1"
mac=$(/sbin/ifconfig "$iface" | sed -n -e '/^.*HWaddr \([:[:xdigit:]]*\).*/{s//\1/;y/ABCDEF/abcdef/;p;q;}')
which=""
while read testmac scheme; do
if [ "$which" ]; then continue; fi
if [ "$mac" = "$(echo "$testmac" | sed -e 'y/ABCDEF/abcdef/')" ]; then which="$scheme"; fi
done
if [ "$which" ]; then echo $which; exit 0; fi
exit 1
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/doc/ifupdown/examples/get-mac-address.sh
|
Shell
|
gpl-3.0
| 380 |
#!/bin/bash
PATHTEST='./src/test/java/es/tfg/ejemplos/CalculadoraTest.java'
RESULTADOS='./resultsTests'
if [ -e ./$RESULTADOS ] ; then
rm -r ./$RESULTADOS;
fi
mkdir $RESULTADOS
for testFile in $(ls ./tests)
do
echo $testFile
rm $PATHTEST
cp ./tests/$testFile $PATHTEST
sh run.sh
cp ./results/result.txt $RESULTADOS/result_$testFile.txt
done
echo "Terminado"
|
cristhro/MutationTesting
|
proyectosPIT/Figuras_v2/ejecutarTests.sh
|
Shell
|
gpl-3.0
| 372 |
if [ "x${SDKDIR}" = "x" ]; then
echo "You need to set the SDKDIR environment variable in order to use this script! Nothing will work until you do."
fi
if [ "x${NDKDIR}" = "x" ]; then
echo "You need to set the NDKDIR environment variable in order to use this script! Nothing will work until you do."
fi
if [ "x${GCSDKDIR}" = "x" ]; then
echo "You need to set the GCSDKDIR environment variable in order to use this script! Nothing will work until you do."
fi
PATH="${SDKDIR}/platform-tools:${SDKDIR}/tools:${NDKDIR}:${PATH}"
function readmanifest () {
local source
source=$(cat <<END
import json,sys;
o = json.load(open(sys.argv[1]));
try:
exec("print(o['" + "']['".join(sys.argv[2].split('.')) + "'])");
except KeyError:
pass
END
)
echo `python -c "$source" manifest.json "$1"`
}
function serve () {
if [ ! -e "manifest.json" ]; then echo "You must be inside a project to do that!"; return -1; fi
source ${GCSDKDIR}/gc_env/bin/activate
tealeaf serve
deactivate
return 0
}
function deploy () {
local studio
local debug
if [ ! -e "manifest.json" ]; then echo "You must be inside a project to do that!"; return -1; fi
source "${GCSDKDIR}/gc_env/bin/activate"
tealeaf deploy --target android $*
if [ $? -ne 0 ]; then
echo "Deploy failed!"
return -1
fi
deactivate
return 0
}
function debug () {
if [ ! -e "AndroidManifest.xml" ]; then echo "You must be inside an Android project directory to do that!"; return -1; fi
rm -rf libs jni obj
for dir in jni obj libs; do
ln -s "${ANDROIDDIR}/TeaLeaf/${dir}" "${dir}"
done
ndk-gdb --start --force
rm -rf libs jni obj
}
function install () {
local apk
local shortname
if [ ! -e "manifest.json" ]; then echo "You must be inside a project to do that!"; return -1; fi
# does the apk exist already?
shortname=`readmanifest shortName`
apk="build/${shortname}.apk"
if [ ! -e "${apk}" ]; then
# nope, run deploy
deploy
if [ $? -ne 0 ]; then
echo "Install failed!"
return -1
fi
fi
adb install ${apk}
return $?
}
function uninstall () {
local pkg
if [ ! -e "manifest.json" ]; then echo "You must be inside a project to do that!"; return -1; fi
# TODO support other studio names
shortname=`readmanifest shortName`
pkg="cat.wee.${pkg}"
adb uninstall ${pkg}
return $?
}
function reinstall () {
uninstall
install
}
|
hashcube/native-android
|
sdktools.sh
|
Shell
|
gpl-3.0
| 2,326 |
#!/usr/bin/env bash
# Use this script to pin the commit used by the developments tracked by the CI
OVERLAYS="./dev/ci/ci-basic-overlay.sh"
process_development() {
local DEV=$1
local REPO_VAR="${DEV}_CI_GITURL"
local REPO=${!REPO_VAR}
local BRANCH_VAR="${DEV}_CI_REF"
local BRANCH=${!BRANCH_VAR}
if [[ -z "$BRANCH" ]]
then
echo "$DEV has no branch set, skipping"
return 0
fi
if [[ $BRANCH =~ ^[a-f0-9]{40}$ ]]
then
echo "$DEV is already set to hash $BRANCH, skipping"
return 0
fi
echo "Resolving $DEV as $BRANCH from $REPO"
local HASH=$(git ls-remote --heads $REPO $BRANCH | cut -f 1)
if [[ -z "$HASH" ]]
then
echo "Could not resolve reference $BRANCH for $DEV (something went wrong), skipping"
return 0
fi
read -p "Expand $DEV from $BRANCH to $HASH? [y/N] " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
# use -i.bak to be compatible with MacOS; see, e.g., https://stackoverflow.com/a/7573438/377022
sed -i.bak -e "s/$BRANCH_VAR:=$BRANCH/$BRANCH_VAR:=$HASH/" $OVERLAYS
fi
}
# Execute the script to set the overlay variables
. $OVERLAYS
for project in ${projects[@]}
do
process_development $project
done
|
silene/coq
|
dev/tools/pin-ci.sh
|
Shell
|
lgpl-2.1
| 1,181 |
sudo apt-get update
sudo apt-get install -y ant openjdk-8-jdk; git clone https://github.com/fossasia/susi_server.git susi_server
cd susi_server;
sed -i.bak 's/^\(port.http=\).*/\180/' conf/config.properties
sed -i.bak 's/^\(port.https=\).*/\1443/' conf/config.properties
sed -i.bak 's/^\(upgradeInterval=\).*/\186400000000/' conf/config.properties
ant
bin/start.sh
|
fazeem84/susi_server
|
cloud9-setup.sh
|
Shell
|
lgpl-2.1
| 393 |
#!/bin/bash
cd "$(dirname $0)"
../../commons/show_data_info.sh $*
|
mF2C/COMPSs
|
performance_analysis/auto-cbm/scripts/get-results/cbmi/formatOut/cbm3/bin/show_data_info.sh
|
Shell
|
apache-2.0
| 65 |
#!/bin/bash
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
set -x
export MAKEFLAGS=-j4
export PYTHON=/usr/local/python25/bin/python
echo "========= autogen.sh"
./autogen.sh || exit $?
echo "========= configure"
# --with-junit=/usr/share/java/junit.jar
# --with-jdk=/usr/lib/jvm/java-1.6.0-openjdk-1.6.0.0.x86_64 \
# --without-berkeley-db \
./configure --enable-javahl --enable-maintainer-mode \
--with-neon=/usr \
--with-serf=/usr/local \
--with-apxs=/usr/sbin/apxs \
--with-berkeley-db \
--with-apr=/usr \
--with-apr-util=/usr \
--with-jdk=/opt/java/jdk1.6.0_15 \
--with-junit=/home/bt/junit-4.4.jar \
--with-sqlite=/home/bt/packages/sqlite-amalgamation-dir/sqlite3.c \
|| exit $?
echo "========= make"
make || exit $?
echo "========= make javahl"
make javahl -j1 || exit $?
echo "========= make swig-py"
make swig-py || exit $?
echo "========= make swig-pl"
make swig-pl -j1 || exit $?
echo "========= make swig-rb"
make swig-rb -j1 || exit $?
exit 0
|
centic9/subversion-ppa
|
tools/buildbot/slaves/centos/svnbuild.sh
|
Shell
|
apache-2.0
| 1,848 |
#!/bin/bash
BASE_DIR=`dirname $0`
echo ""
echo "Starting Karma Server (http://karma-runner.github.io/)"
echo "------------------------------------------------------"
karma start $BASE_DIR/../config/karma.conf.js $*
|
jackpunt/playground
|
scripts/test.sh
|
Shell
|
apache-2.0
| 218 |
#!/usr/bin/env bash
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
TOP_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd -P)"
source "${TOP_DIR}/scripts/apollo_base.sh"
${TOP_DIR}/bazel-bin/modules/tools/plot_trace/plot_trace "$@"
|
jinghaomiao/apollo
|
scripts/plot_trace.sh
|
Shell
|
apache-2.0
| 950 |
#!/bin/bash
# Author: Matt Mastracci ([email protected])
# AppleScript from http://stackoverflow.com/questions/4309087/cancel-button-on-osascript-in-a-bash-script
# licensed under cc-wiki with attribution required
# Remainder of script public domain
osascript -e 'tell application "iTerm2" to version' > /dev/null 2>&1 && NAME=iTerm2 || NAME=iTerm
if [[ $NAME = "iTerm" ]]; then
FILE=`osascript -e 'tell application "iTerm" to activate' -e 'tell application "iTerm" to set thefile to choose folder with prompt "Choose a folder to place received files in"' -e "do shell script (\"echo \"&(quoted form of POSIX path of thefile as Unicode text)&\"\")"`
else
FILE=`osascript -e 'tell application "iTerm2" to activate' -e 'tell application "iTerm2" to set thefile to choose folder with prompt "Choose a folder to place received files in"' -e "do shell script (\"echo \"&(quoted form of POSIX path of thefile as Unicode text)&\"\")"`
fi
if [[ $FILE = "" ]]; then
echo Cancelled.
# Send ZModem cancel
echo -e \\x18\\x18\\x18\\x18\\x18
sleep 1
echo
echo \# Cancelled transfer
else
cd "$FILE"
/usr/local/bin/rz -E -e -b
sleep 1
echo
echo
echo \# Sent \-\> $FILE
fi
|
bestswifter/macbootstrap
|
tools/iterm2-recv-zmodem.sh
|
Shell
|
apache-2.0
| 1,177 |
#!/bin/bash
# Copyright 2009-2012 Microsoft Corporation Johns Hopkins University (Author: Daniel Povey)
# Apache 2.0.
if [ $# -le 3 ]; then
echo "Arguments should be a list of WSJ directories, see ../run.sh for example."
exit 1;
fi
#dir=`pwd`/data/local/data
#lmdir=`pwd`/data/local/nist_lm
#mkdir -p $dir $lmdir
dir=${*: -1} # the last argument is the data directory where we want to create the scp files
mkdir -p $dir
wsj_local=../../wsj/s5/local
local=`pwd`/local
utils=`pwd`/utils
curr_dir=`pwd`;
# delete the last argument from the posnl param array. ($# is the index of last arg)
nargs=$#; set -- "${@:1: $nargs - 1}"
. ./path.sh # Needed for KALDI_ROOT
export PATH=$PATH:$KALDI_ROOT/tools/irstlm/bin
sph2pipe=$KALDI_ROOT/tools/sph2pipe_v2.5/sph2pipe
if [ ! -x $sph2pipe ]; then
echo "Could not find (or execute) the sph2pipe program at $sph2pipe";
exit 1;
fi
[ -f $wsj_local/ndx2flist.pl ] || { echo "$wsj_local/ndx2flist.pl does not exist"; exit 1; }
cp $wsj_local/ndx2flist.pl $local
cd $dir
# Make directory of links to the WSJ disks such as 11-13.1. This relies on the command
# line arguments being absolute pathnames.
rm -r links/ 2>/dev/null
mkdir links/
ln -s $* links
# Do some basic checks that we have what we expected.
if [ ! -d links/11-13.1 -o ! -d links/13-34.1 -o ! -d links/11-2.1 ]; then
echo "wsj_data_prep.sh: Spot check of command line arguments failed"
echo "Command line arguments must be absolute pathnames to WSJ directories"
echo "with names like 11-13.1."
exit 1;
fi
# This version for SI-84
cat links/11-13.1/wsj0/doc/indices/train/tr_s_wv1.ndx | \
$local/ndx2flist.pl $* | sort | \
grep -v -i 11-2.1/wsj0/si_tr_s/401 > train_si84.flist
nl=`cat train_si84.flist | wc -l`
[ "$nl" -eq 7138 ] || echo "Warning: expected 7138 lines in train_si84.flist, got $nl"
## This version for SI-284
#cat links/13-34.1/wsj1/doc/indices/si_tr_s.ndx \
#links/11-13.1/wsj0/doc/indices/train/tr_s_wv1.ndx | \
#$local/ndx2flist.pl $* | sort | \
#grep -v -i 11-2.1/wsj0/si_tr_s/401 > train_si284.flist
#nl=`cat train_si284.flist | wc -l`
#[ "$nl" -eq 37416 ] || echo "Warning: expected 37416 lines in train_si284.flist, got $nl"
cd $curr_dir
# Make the wav.scp, utt2spk, spk2utt, spk2gender files.
$local/make_wsj.pl $dir/train_si84.flist $KALDI_ROOT/tools/sph2pipe_v2.5/sph2pipe $dir
# Make the wav.scp, utt2spk and spk2utt files.
# for x in train_si84 train_si284; do
# $local/flist2scp.pl $x.flist | sort > ${x}_sph.scp
# awk '{printf("%s '$sph2pipe' -f wav %s |\n", $1, $2);}' < ${x}_sph.scp > ${x}_wav.scp
# cat ${x}_sph.scp | awk '{print $1}' | perl -ane 'chop; m:^...:; print "$_ $&\n";' > $x.utt2spk
# cat $x.utt2spk | $utils/utt2spk_to_spk2utt.pl > $x.spk2utt || exit 1;
#done
false && {
# Now for the test sets.
# links/13-34.1/wsj1/doc/indices/readme.doc
# describes all the different test sets.
# Note: each test-set seems to come in multiple versions depending
# on different vocabulary sizes, verbalized vs. non-verbalized
# pronunciations, etc. We use the largest vocab and non-verbalized
# pronunciations.
# The most normal one seems to be the "baseline 60k test set", which
# is h1_p0.
# Nov'92 (333 utts)
# These index files have a slightly different format;
# have to add .wv1
cat links/11-13.1/wsj0/doc/indices/test/nvp/si_et_20.ndx | \
$local/ndx2flist.pl $* | awk '{printf("%s.wv1\n", $1)}' | \
sort > test_eval92.flist
# Nov'92 (330 utts, 5k vocab)
cat links/11-13.1/wsj0/doc/indices/test/nvp/si_et_05.ndx | \
$local/ndx2flist.pl $* | awk '{printf("%s.wv1\n", $1)}' | \
sort > test_eval92_5k.flist
# Nov'93: (213 utts)
# Have to replace a wrong disk-id.
cat links/13-32.1/wsj1/doc/indices/wsj1/eval/h1_p0.ndx | \
sed s/13_32_1/13_33_1/ | \
$local/ndx2flist.pl $* | sort > test_eval93.flist
# Nov'93: (213 utts, 5k)
cat links/13-32.1/wsj1/doc/indices/wsj1/eval/h2_p0.ndx | \
sed s/13_32_1/13_33_1/ | \
$local/ndx2flist.pl $* | sort > test_eval93_5k.flist
# Dev-set for Nov'93 (503 utts)
cat links/13-34.1/wsj1/doc/indices/h1_p0.ndx | \
$local/ndx2flist.pl $* | sort > test_dev93.flist
# Dev-set for Nov'93 (513 utts, 5k vocab)
cat links/13-34.1/wsj1/doc/indices/h2_p0.ndx | \
$local/ndx2flist.pl $* | sort > test_dev93_5k.flist
# Dev-set Hub 1,2 (503, 913 utterances)
# Note: the ???'s below match WSJ and SI_DT, or wsj and si_dt.
# Sometimes this gets copied from the CD's with upcasing, don't know
# why (could be older versions of the disks).
find `readlink links/13-16.1`/???1/??_??_20 -print | grep -i ".wv1" | sort > dev_dt_20.flist
find `readlink links/13-16.1`/???1/??_??_05 -print | grep -i ".wv1" | sort > dev_dt_05.flist
# Finding the transcript files:
for x in $*; do find -L $x -iname '*.dot'; done > dot_files.flist
# Convert the transcripts into our format (no normalization yet)
for x in train_si84 train_si284 test_eval92 test_eval93 test_dev93 test_eval92_5k test_eval93_5k test_dev93_5k dev_dt_05 dev_dt_20; do
$local/flist2scp.pl $x.flist | sort > ${x}_sph.scp
cat ${x}_sph.scp | awk '{print $1}' | $local/find_transcripts.pl dot_files.flist > $x.trans1
done
# Do some basic normalization steps. At this point we don't remove OOVs--
# that will be done inside the training scripts, as we'd like to make the
# data-preparation stage independent of the specific lexicon used.
noiseword="<NOISE>";
for x in train_si84 train_si284 test_eval92 test_eval93 test_dev93 test_eval92_5k test_eval93_5k test_dev93_5k dev_dt_05 dev_dt_20; do
cat $x.trans1 | $local/normalize_transcript.pl $noiseword | sort > $x.txt || exit 1;
done
# Create scp's with wav's. (the wv1 in the distribution is not really wav, it is sph.)
for x in train_si84 train_si284 test_eval92 test_eval93 test_dev93 test_eval92_5k test_eval93_5k test_dev93_5k dev_dt_05 dev_dt_20; do
awk '{printf("%s '$sph2pipe' -f wav %s |\n", $1, $2);}' < ${x}_sph.scp > ${x}_wav.scp
done
# Make the utt2spk and spk2utt files.
for x in train_si84 train_si284 test_eval92 test_eval93 test_dev93 test_eval92_5k test_eval93_5k test_dev93_5k dev_dt_05 dev_dt_20; do
cat ${x}_sph.scp | awk '{print $1}' | perl -ane 'chop; m:^...:; print "$_ $&\n";' > $x.utt2spk
cat $x.utt2spk | $utils/utt2spk_to_spk2utt.pl > $x.spk2utt || exit 1;
done
#in case we want to limit lm's on most frequent words, copy lm training word frequency list
cp links/13-32.1/wsj1/doc/lng_modl/vocab/wfl_64.lst $lmdir
chmod u+w $lmdir/*.lst # had weird permissions on source.
# The 20K vocab, open-vocabulary language model (i.e. the one with UNK), without
# verbalized pronunciations. This is the most common test setup, I understand.
cp links/13-32.1/wsj1/doc/lng_modl/base_lm/bcb20onp.z $lmdir/lm_bg.arpa.gz || exit 1;
chmod u+w $lmdir/lm_bg.arpa.gz
# trigram would be:
cat links/13-32.1/wsj1/doc/lng_modl/base_lm/tcb20onp.z | \
perl -e 'while(<>){ if(m/^\\data\\/){ print; last; } } while(<>){ print; }' | \
gzip -c -f > $lmdir/lm_tg.arpa.gz || exit 1;
prune-lm --threshold=1e-7 $lmdir/lm_tg.arpa.gz $lmdir/lm_tgpr.arpa || exit 1;
gzip -f $lmdir/lm_tgpr.arpa || exit 1;
# repeat for 5k language models
cp links/13-32.1/wsj1/doc/lng_modl/base_lm/bcb05onp.z $lmdir/lm_bg_5k.arpa.gz || exit 1;
chmod u+w $lmdir/lm_bg_5k.arpa.gz
# trigram would be: !only closed vocabulary here!
cp links/13-32.1/wsj1/doc/lng_modl/base_lm/tcb05cnp.z $lmdir/lm_tg_5k.arpa.gz || exit 1;
chmod u+w $lmdir/lm_tg_5k.arpa.gz
gunzip $lmdir/lm_tg_5k.arpa.gz
tail -n 4328839 $lmdir/lm_tg_5k.arpa | gzip -c -f > $lmdir/lm_tg_5k.arpa.gz
rm $lmdir/lm_tg_5k.arpa
prune-lm --threshold=1e-7 $lmdir/lm_tg_5k.arpa.gz $lmdir/lm_tgpr_5k.arpa || exit 1;
gzip -f $lmdir/lm_tgpr_5k.arpa || exit 1;
if [ ! -f wsj0-train-spkrinfo.txt ] || [ `cat wsj0-train-spkrinfo.txt | wc -l` -ne 134 ]; then
rm wsj0-train-spkrinfo.txt
! wget http://www.ldc.upenn.edu/Catalog/docs/LDC93S6A/wsj0-train-spkrinfo.txt && \
echo "Getting wsj0-train-spkrinfo.txt from backup location" && \
wget --no-check-certificate https://sourceforge.net/projects/kaldi/files/wsj0-train-spkrinfo.txt
fi
if [ ! -f wsj0-train-spkrinfo.txt ]; then
echo "Could not get the spkrinfo.txt file from LDC website (moved)?"
echo "This is possibly omitted from the training disks; couldn't find it."
echo "Everything else may have worked; we just may be missing gender info"
echo "which is only needed for VTLN-related diagnostics anyway."
exit 1
fi
# Note: wsj0-train-spkrinfo.txt doesn't seem to be on the disks but the
# LDC put it on the web. Perhaps it was accidentally omitted from the
# disks.
cat links/11-13.1/wsj0/doc/spkrinfo.txt \
links/13-32.1/wsj1/doc/evl_spok/spkrinfo.txt \
links/13-34.1/wsj1/doc/dev_spok/spkrinfo.txt \
links/13-34.1/wsj1/doc/train/spkrinfo.txt \
./wsj0-train-spkrinfo.txt | \
perl -ane 'tr/A-Z/a-z/; m/^;/ || print;' | \
awk '{print $1, $2}' | grep -v -- -- | sort | uniq > spk2gender
echo "Data preparation succeeded"
}
echo "wsj data prep succeeded"
|
irrawaddy28/multilingualdbn
|
local/make_wsj.sh
|
Shell
|
apache-2.0
| 8,968 |
#!/bin/bash
awk '
BEGIN {
# define devices to exclude
excluded_devices[0] = "^ram"
excluded_devices[1] = "^loop"
}
function foo(device, mdmap, result) {
for (ij in mdmap) {
split(ij, xx, SUBSEP)
if (xx[1] == device) {
result[xx[2]] = 1
}
}
}
{
if (NR == FNR) {
FS = ":"
$0=$0
if (match($1, "md[0-9]+")) {
mddevice = $1
gsub(/ /, "", mddevice)
gsub(/\[[0-9]+\]/, "", $2)
split($2, devices, " ")
dev_status[mddevice] = devices[1]
for (i in devices) {
# first two fields are status, type
if (i > 2) {
mdmap[mddevice,devices[i]] = 1
}
}
}
} else {
FS = " "
$0=$0
# exclude devices
for (i in excluded_devices) {
if ($3 ~ excluded_devices[i]) {
next
}
}
rd_ms[$3] = $7
wr_ms[$3] = $11
io_in_progress[$3] = $12
io_ms[$3] = $13
if ($3 ~ /md/) {
# initialize array
split("", devices)
foo($3, mdmap, devices)
for (d in devices) {
$7+= rd_ms[d]
$11 += wr_ms[d]
$12 += io_in_progress[d]
$13 += io_ms[d]
}
}
print $3"`rd_completed\tL\t"$4
print $3"`rd_merged\tL\t"$5
print $3"`rd_sectors\tL\t"$6
print $3"`rd_ms\tL\t"$7
print $3"`wr_completed\tL\t"$8
print $3"`wr_merged\tL\t"$9
print $3"`wr_sectors\tL\t"$10
print $3"`wr_ms\tL\t"$11
print $3"`io_in_progress\tL\t"$12
print $3"`io_ms\tL\t"$13
print $3"`io_ms_weighted\tL\t"$14
}
}
' /proc/mdstat /proc/diskstats
|
maier/nad
|
plugins/linux/diskstats.sh
|
Shell
|
bsd-3-clause
| 1,846 |
#!/bin/sh
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Tests emerging all the ebuilds that use vboot_reference either as an
# ebuild dependency or by checking out the code and compiling it in a
# different ebuild. This is meant to be run from the chroot as part of testing
# a new change in vboot_reference.
# Required ebuilds:
TEST_EBUILDS="
sys-boot/chromeos-bootimage
sys-boot/chromeos-u-boot
sys-boot/coreboot
sys-boot/depthcharge
chromeos-base/chromeos-cryptohome
chromeos-base/chromeos-ec
chromeos-base/chromeos-installer
chromeos-base/chromeos-initramfs
chromeos-base/chromeos-login
chromeos-base/update_engine
chromeos-base/vboot_reference
chromeos-base/verity
"
set -e
# Check running inside the chroot.
if [ ! -e /etc/cros_chroot_version ]; then
echo "You must run this inside the chroot." >&2
exit 1
fi
# Detect the target board.
if [ "x${BOARD}" == "x" ]; then
if [ -e ~/trunk/src/scripts/.default_board ]; then
BOARD="`cat ~/trunk/src/scripts/.default_board`"
else
echo "You must pass BOARD environment variable or set a default board." >&2
exit 1
fi
fi
VBOOT_REF_DIR="$(dirname "$0")"
echo "Running tests for board '${BOARD}' from ${VBOOT_REF_DIR}"
cd "${VBOOT_REF_DIR}"
echo "Running make runtests..."
make runtests -j32
echo "Removing build artifacts."
rm -rf build build-main
echo "Running emerge tests (runs cros_workon start)."
# Ignore errors about already working on those repos.
cros_workon-${BOARD} start ${TEST_EBUILDS} || true
emerge-${BOARD} ${TEST_EBUILDS}
|
coreboot/vboot
|
emerge_test.sh
|
Shell
|
bsd-3-clause
| 1,663 |
#!/usr/bin/env bash
# Run the pipeline (ReadsPipelineSpark) on small data in HDFS.
. utils.sh
time_gatk "ReadsPipelineSpark -I hdfs:///user/$USER/small_spark_eval/CEUTrio.HiSeq.WGS.b37.NA12878.20.21.bam -O hdfs://${HDFS_HOST_PORT}/user/$USER/small_spark_eval/out/CEUTrio.HiSeq.WGS.b37.NA12878.20.21.vcf -R hdfs:///user/$USER/small_spark_eval/human_g1k_v37.20.21.2bit --known-sites hdfs://${HDFS_HOST_PORT}/user/$USER/small_spark_eval/dbsnp_138.b37.20.21.vcf -pairHMM AVX_LOGLESS_CACHING --max-reads-per-alignment-start 10" 1 8 32g 4g
|
magicDGS/gatk
|
scripts/spark_eval/small_reads-pipeline_hdfs.sh
|
Shell
|
bsd-3-clause
| 537 |
#!/bin/bash
# from https://github.com/ekmett/lens/blob/master/scripts/hackage-docs.sh
# Copyright 2012-2015 Edward Kmett, BSD3 license
set -e
if [ "$#" -ne 1 ]; then
echo "Usage: scripts/hackage-docs.sh HACKAGE_USER"
exit 1
fi
user=$1
cabal_file=$(find . -maxdepth 1 -name "*.cabal" -print -quit)
if [ ! -f "$cabal_file" ]; then
echo "Run this script in the top-level package directory"
exit 1
fi
pkg=$(awk -F ":[[:space:]]*" 'tolower($1)=="name" { print $2 }' < "$cabal_file")
ver=$(awk -F ":[[:space:]]*" 'tolower($1)=="version" { print $2 }' < "$cabal_file")
if [ -z "$pkg" ]; then
echo "Unable to determine package name"
exit 1
fi
if [ -z "$ver" ]; then
echo "Unable to determine package version"
exit 1
fi
echo "Detected package: $pkg-$ver"
dir=$(mktemp -d build-docs.XXXXXX)
trap 'rm -r "$dir"' EXIT
if haddock --hyperlinked-source >/dev/null
then
echo "Using fancy hyperlinked source"
HYPERLINK_FLAG="--haddock-option=--hyperlinked-source"
else
echo "Using boring hyperlinked source"
HYPERLINK_FLAG="--hyperlink-source"
fi
cabal haddock --hoogle $HYPERLINK_FLAG --html-location='/package/$pkg-$version/docs' --contents-location='/package/$pkg-$version'
cp -R dist/doc/html/$pkg/ $dir/$pkg-$ver-docs
tar cvz -C $dir --format=ustar -f $dir/$pkg-$ver-docs.tar.gz $pkg-$ver-docs
curl -X PUT \
-H 'Content-Type: application/x-tar' \
-H 'Content-Encoding: gzip' \
-u "$user" \
--data-binary "@$dir/$pkg-$ver-docs.tar.gz" \
"https://hackage.haskell.org/package/$pkg-$ver/docs"
|
jkr/pandoc-citeproc
|
hackage-docs.sh
|
Shell
|
bsd-3-clause
| 1,545 |
#!/bin/sh
/usr/VICE/bin/x64
|
AreaScout/vice-gles2
|
src/arch/sdl/syllable-files/x64.sh
|
Shell
|
gpl-2.0
| 29 |
# Boeffla-Config controller interface
#
# Version: GPU 4 frequencies
#
# (C) andip71
# ********************************
# Kernel specific initialisation
# ********************************
# kernel specification (hardware; type; target; url)
KERNEL_SPECS="i9300;samsung;jb41;http://boeffla.df-kunde.de/sgs3/boeffla-kernel/"
# path to kernel libraries
LIBPATH="/lib/modules" # Samsung
#LIBPATH="/system/lib/modules" # Cyanogenmod+Omni
# *******************
# List of values
# *******************
if [ "lov_gov_profiles" == "$1" ]; then
echo "pegasusq - boeffla moderate;pegasusq - boeffla battery saving;pegasusq - boeffla 1 core;pegasusq - boeffla 2 cores;pegasusq - speedmod;zzmoove - optimal;zzmoove - battery;zzmoove - battery yank;zzmoove - battery extreme yank;zzmoove - performance;pegasusqplus - balanced;pegasusqplus - battery"
exit 0
fi
if [ "lov_cpu_volt_profiles" == "$1" ]; then
echo "undervolt -25mV;undervolt -50mV;undervolt -75mV;undervolt -100mV;undervolt light;undervolt medium;undervolt heavy"
exit 0
fi
if [ "lov_gpu_freq_profiles" == "$1" ]; then
echo "54 only;160 only;160/266;266/350;54/108/160/266;108/160/266/350;160/266/350/440 (default);266/350/440/533;350/440/533/600;440/533/600/700"
exit 0
fi
if [ "lov_gpu_volt_profiles" == "$1" ]; then
echo "undervolt -25mV;undervolt -50mV;undervolt -75mV;undervolt -100mV;undervolt light;undervolt medium;undervolt heavy;overvolt +25mV;overvolt +50mV;overvolt +75mV;overvolt +100mV"
exit 0
fi
if [ "lov_gpu_freq" == "$1" ]; then
echo "54;108;160;266;350;440;533;600;700"
exit 0
fi
if [ "lov_eq_gain_profiles" == "$1" ]; then
echo "Archis audiophile;Baseland;Bass extreme;Bass treble;Classic;Dance;Eargasm;Metal/Rock;Pleasant;Treble"
exit 0
fi
if [ "lov_system_tweaks" == "$1" ]; then
echo "Off;Boeffla tweaks;Speedmod tweaks;Mattiadj tweaks"
exit 0
fi
if [ "lov_modules" == "$1" ]; then
ls $LIBPATH/*
exit 0
fi
if [ "lov_presets" == "$1" ]; then
# Note, the ^ sign will be translated into newline for this setting
echo "Power extreme~"
echo "Gov: lulzactiveq / no profile"
echo "^Sched: row / row"
echo "^CPU: 1600 / no uv"
echo "^GPU: 440-700 / +50mV;"
echo "Power~"
echo "Gov: zzmoove / zzmoove-performance"
echo "^Sched: row / row"
echo "^CPU: 1500 / no uv"
echo "^GPU: 266-533 / no uv;"
echo "Standard~"
echo "Gov: pegasusq / no profile"
echo "^Sched: cfq / cfq"
echo "^CPU: 1400 / no uv"
echo "^GPU: 160-440 / no uv;"
echo "Battery friendly~"
echo "Gov: pegasusq / boeffla-moderate"
echo "^Sched: cfq / cfq"
echo "^CPU: 1400 / -25mV"
echo "^GPU: 160/266 / -25mV;"
echo "Battery saving~"
echo "Gov: zzmoove / zzmoove-battery"
echo "^Sched: cfq / cfq"
echo "^CPU: 1000 / light uv"
echo "^GPU: 160/266 / light uv;"
exit 0
fi
# ************************************
# Configuration values (for profiles)
# ************************************
if [ "conf_presets" == "$1" ]; then
if [ "Power extreme" == "$2" ]; then
# gov, gov prof, sched int, sched ext, cpu max, cpu uv, gpu freq, gpu uv
echo "lulzactiveq;None;"
echo "row;row;"
echo "1600000;None;"
echo "440/533/600/700;overvolt +50mV"
fi
if [ "Power" == "$2" ]; then
# gov, gov prof, sched int, sched ext, cpu max, cpu uv, gpu freq, gpu uv
echo "zzmoove;zzmoove - performance;"
echo "row;row;"
echo "1500000;None;"
echo "266/350/440/533;None"
fi
if [ "Standard" == "$2" ]; then
# gov, gov prof, sched int, sched ext, cpu max, cpu uv, gpu freq, gpu uv
echo "pegasusq;None;"
echo "cfq;cfq;"
echo "1400000;None;"
echo "None;None"
fi
if [ "Battery friendly" == "$2" ]; then
# gov, gov prof, sched int, sched ext, cpu max, cpu uv, gpu freq, gpu uv
echo "pegasusq;pegasusq - boeffla moderate;"
echo "cfq;cfq;"
echo "1400000;undervolt -25mV;"
echo "160/266;undervolt -25mV"
fi
if [ "Battery saving" == "$2" ]; then
# gov, gov prof, sched int, sched ext, cpu max, cpu uv, gpu freq, gpu uv
echo "zzmoove;zzmoove - battery;"
echo "cfq;cfq;"
echo "1000000;undervolt light;"
echo "160/266;undervolt light"
fi
exit 0
fi
if [ "conf_gpu_freq" == "$1" ]; then
if [ "54 only" == "$2" ]; then
echo "54;54;54;54"
fi
if [ "160 only" == "$2" ]; then
echo "160;160;160;160"
fi
if [ "160/266" == "$2" ]; then
echo "160;160;266;266"
fi
if [ "266/350" == "$2" ]; then
echo "266;266;350;350"
fi
if [ "54/108/160/266" == "$2" ]; then
echo "54;108;160;266"
fi
if [ "108/160/266/350" == "$2" ]; then
echo "108 160 266 350"
fi
if [ "160/266/350/440 (default)" == "$2" ]; then
echo "160;266;350;440"
fi
if [ "266/350/440/533" == "$2" ]; then
echo "266;350;440;533"
fi
if [ "350/440/533/600" == "$2" ]; then
echo "350;440;533;600"
fi
if [ "440/533/600/700" == "$2" ]; then
echo "440;533;600;700"
fi
exit 0
fi
if [ "conf_gpu_volt" == "$1" ]; then
if [ "undervolt -25mV" == "$2" ]; then
echo "-25000;-25000;-25000;-25000"
fi
if [ "undervolt -50mV" == "$2" ]; then
echo "-50000;-50000;-50000;-50000"
fi
if [ "undervolt -75mV" == "$2" ]; then
echo "-75000;-75000;-75000;-75000"
fi
if [ "undervolt -100mV" == "$2" ]; then
echo "-100000;-100000;-100000;-100000"
fi
if [ "undervolt light" == "$2" ]; then
echo "-25000;-25000;-50000;-50000"
fi
if [ "undervolt medium" == "$2" ]; then
echo "-50000;-50000;-75000;-75000"
fi
if [ "undervolt heavy" == "$2" ]; then
echo "-75000;-75000;-100000;-100000"
fi
if [ "overvolt +25mV" == "$2" ]; then
echo "25000;25000;25000;25000"
fi
if [ "overvolt +50mV" == "$2" ]; then
echo "50000;50000;50000;50000"
fi
if [ "overvolt +75mV" == "$2" ]; then
echo "75000;75000;75000;75000"
fi
if [ "overvolt +100mV" == "$2" ]; then
echo "100000;100000;100000;100000"
fi
exit 0
fi
if [ "conf_cpu_volt" == "$1" ]; then
if [ "undervolt -25mV" == "$2" ]; then
echo "-25;-25;-25;-25;-25;-25;-25;-25;-25;-25;-25;-25;-25;-25;-25"
fi
if [ "undervolt -50mV" == "$2" ]; then
echo "-50;-50;-50;-50;-50;-50;-50;-50;-50;-50;-50;-50;-50;-50;-50"
fi
if [ "undervolt -75mV" == "$2" ]; then
echo "-75;-75;-75;-75;-75;-75;-75;-75;-75;-75;-75;-75;-75;-75;-75"
fi
if [ "undervolt -100mV" == "$2" ]; then
echo "-100;-100;-100;-100;-100;-100;-100;-100;-100;-100;-100;-100;-100;-100;-100"
fi
if [ "undervolt light" == "$2" ]; then
echo "0;0;0;0;0;0;-25;-25;-25;-25;-25;-50;-50;-50;-50"
fi
if [ "undervolt medium" == "$2" ]; then
echo "-25;-25;-25;-25;-25;-25;-50;-50;-50;-50;-50;-75;-75;-75;-75"
fi
if [ "undervolt heavy" == "$2" ]; then
echo "-50;-50;-50;-50;-50;-50;-75;-75;-75;-75;-75;-100;-100;-100;-100"
fi
exit 0
fi
if [ "conf_eq_gains" == "$1" ]; then
if [ "Archis audiophile" == "$2" ]; then
echo "8;4;4;2;6"
fi
if [ "Eargasm" == "$2" ]; then
echo "12;8;4;2;3"
fi
if [ "Pleasant" == "$2" ]; then
echo "4;3;2;2;3"
fi
if [ "Classic" == "$2" ]; then
echo "0;0;0;-3;-5"
fi
if [ "Bass treble" == "$2" ]; then
echo "10;7;0;2;5"
fi
if [ "Bass extreme" == "$2" ]; then
echo "12;8;3;-1;1"
fi
if [ "Treble" == "$2" ]; then
echo "-5;1;0;4;3"
fi
if [ "Baseland" == "$2" ]; then
echo "8;7;4;3;3"
fi
if [ "Dance" == "$2" ]; then
echo "4;0;-6;0;3"
fi
if [ "Metal/Rock" == "$2" ]; then
echo "4;3;0;-4;3"
fi
exit 0
fi
# *******************
# Parameters
# *******************
if [ "param_readahead" == "$1" ]; then
# Internal sd (min/max/steps)
echo "128;3072;128;"
# External sd (min/max/steps)
echo "128;3072;128"
exit 0
fi
if [ "param_boeffla_sound" == "$1" ]; then
# Headphone min/max, Speaker min/max
echo "20;63;57;63;"
# Equalizer min/max
echo "-12;12;"
# Microphone gain min/max
echo "0;31;"
# Stereo expansion min/max
echo "0;31"
exit 0
fi
if [ "param_cpu_uv" == "$1" ]; then
# CPU UV min/max/steps
echo "600;1500;25"
exit 0
fi
if [ "param_gpu_uv" == "$1" ]; then
# GPU UV min/max/steps
echo "600000;1200000;25000"
exit 0
fi
if [ "param_led" == "$1" ]; then
# LED speed min/max/steps
echo "1;5;1;"
# LED brightness min/max/steps
echo "5;130;5"
exit 0
fi
if [ "param_touchwake" == "$1" ]; then
# Touchwake min/max/steps
echo "0;600000;5000"
exit 0
fi
if [ "param_early_suspend_delay" == "$1" ]; then
# Early suspend delay min/max/steps
echo "0;700;25"
exit 0
fi
if [ "param_zram" == "$1" ]; then
# zRam size min/max/steps
echo "104857600;838860800;20971520"
exit 0
fi
if [ "param_charge_rates" == "$1" ]; then
# AC charge min/max/steps
echo "100;1600;25;"
# USB charge min/max/steps
echo "0;1600;25;"
# Wireless charge min/max/steps
echo "100;1000;25"
exit 0
fi
if [ "param_lmk" == "$1" ]; then
# LMK size min/max/steps
echo "5;300;1"
exit 0
fi
# *******************
# Get settings
# *******************
if [ "get_ums" == "$1" ]; then
if [ "`busybox grep 179 /sys/devices/platform/s3c-usbgadget/gadget/lun0/file`" ]; then
echo "1"
else
echo "0"
fi
exit 0
fi
if [ "get_tunables" == "$1" ]; then
if [ -d /sys/devices/system/cpu/cpufreq/$2 ]; then
cd /sys/devices/system/cpu/cpufreq/$2
for file in *
do
content="`busybox cat $file`"
busybox echo -ne "$file~$content;"
done
fi
fi
if [ "get_kernel_version2" == "$1" ]; then
busybox cat /proc/version
exit 0
fi
if [ "get_kernel_specs" == "$1" ]; then
echo $KERNEL_SPECS
exit 0
fi
# *******************
# Applying settings
# *******************
if [ "apply_governor_profile" == "$1" ]; then
if [ "pegasusq - standard" == "$2" ]; then
# cpu2
echo "500000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_1_1
echo "200000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_2_0
echo "100" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_1_1
echo "100" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_2_0
# cpu3
echo "500000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_2_1
echo "200000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_3_0
echo "200" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_2_1
echo "200" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_3_0
# cpu4
echo "500000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_3_1
echo "200000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_4_0
echo "300" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_3_1
echo "300" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_4_0
echo "20" > /sys/devices/system/cpu/cpufreq/pegasusq/cpu_down_rate
echo "10" > /sys/devices/system/cpu/cpufreq/pegasusq/cpu_up_rate
echo "85" > /sys/devices/system/cpu/cpufreq/pegasusq/up_threshold
echo "37" > /sys/devices/system/cpu/cpufreq/pegasusq/freq_step
fi
if [ "pegasusq - boeffla 1 core" == "$2" ]; then
# cpu2
echo "1400000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_1_1
echo "1300000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_2_0
echo "3000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_1_1
echo "3000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_2_0
# cpu3
echo "1400000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_2_1
echo "1300000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_3_0
echo "4000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_2_1
echo "4000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_3_0
# cpu4
echo "1400000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_3_1
echo "1300000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_4_0
echo "5000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_3_1
echo "5000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_4_0
echo "20" > /sys/devices/system/cpu/cpufreq/pegasusq/cpu_down_rate
echo "10" > /sys/devices/system/cpu/cpufreq/pegasusq/cpu_up_rate
echo "85" > /sys/devices/system/cpu/cpufreq/pegasusq/up_threshold
echo "37" > /sys/devices/system/cpu/cpufreq/pegasusq/freq_step
fi
if [ "pegasusq - boeffla 2 cores" == "$2" ]; then
# cpu2
echo "500000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_1_1
echo "200000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_2_0
echo "100" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_1_1
echo "100" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_2_0
# cpu3
echo "1400000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_2_1
echo "1300000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_3_0
echo "4000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_2_1
echo "4000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_3_0
# cpu4
echo "1400000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_3_1
echo "1300000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_4_0
echo "5000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_3_1
echo "5000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_4_0
echo "20" > /sys/devices/system/cpu/cpufreq/pegasusq/cpu_down_rate
echo "10" > /sys/devices/system/cpu/cpufreq/pegasusq/cpu_up_rate
echo "85" > /sys/devices/system/cpu/cpufreq/pegasusq/up_threshold
echo "37" > /sys/devices/system/cpu/cpufreq/pegasusq/freq_step
fi
if [ "pegasusq - speedmod" == "$2" ]; then
# cpu2
echo "500000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_1_1
echo "400000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_2_0
echo "100" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_1_1
echo "100" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_2_0
# cpu3
echo "800000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_2_1
echo "600000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_3_0
echo "200" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_2_1
echo "200" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_3_0
# cpu4
echo "800000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_3_1
echo "600000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_4_0
echo "300" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_3_1
echo "300" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_4_0
echo "10" > /sys/devices/system/cpu/cpufreq/pegasusq/cpu_down_rate
echo "10" > /sys/devices/system/cpu/cpufreq/pegasusq/cpu_up_rate
echo "85" > /sys/devices/system/cpu/cpufreq/pegasusq/up_threshold
echo "37" > /sys/devices/system/cpu/cpufreq/pegasusq/freq_step
fi
if [ "pegasusq - boeffla battery saving" == "$2" ]; then
# cpu2
echo "1400000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_1_1
echo "1300000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_2_0
echo "500" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_1_1
echo "500" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_2_0
# cpu3
echo "1400000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_2_1
echo "1300000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_3_0
echo "550" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_2_1
echo "550" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_3_0
# cpu4
echo "1400000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_3_1
echo "1300000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_4_0
echo "600" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_3_1
echo "600" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_4_0
echo "10" > /sys/devices/system/cpu/cpufreq/pegasusq/cpu_down_rate
echo "5" > /sys/devices/system/cpu/cpufreq/pegasusq/cpu_up_rate
echo "95" > /sys/devices/system/cpu/cpufreq/pegasusq/up_threshold
echo "25" > /sys/devices/system/cpu/cpufreq/pegasusq/freq_step
fi
if [ "pegasusq - boeffla moderate" == "$2" ]; then
# cpu2
echo "800000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_1_1
echo "700000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_2_0
echo "100" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_1_1
echo "100" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_2_0
# cpu3
echo "1100000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_2_1
echo "1000000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_3_0
echo "300" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_2_1
echo "300" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_3_0
# cpu4
echo "1300000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_3_1
echo "1200000" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_4_0
echo "400" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_3_1
echo "400" > /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_4_0
echo "10" > /sys/devices/system/cpu/cpufreq/pegasusq/cpu_down_rate
echo "10" > /sys/devices/system/cpu/cpufreq/pegasusq/cpu_up_rate
echo "85" > /sys/devices/system/cpu/cpufreq/pegasusq/up_threshold
echo "37" > /sys/devices/system/cpu/cpufreq/pegasusq/freq_step
fi
if [ "pegasusqplus - standard" == "$2" ]; then
echo "30" > /sys/devices/system/cpu/cpufreq/pegasusqplus/cpu_down_rate
echo "2" > /sys/devices/system/cpu/cpufreq/pegasusqplus/cpu_online_bias_count
echo "30" > /sys/devices/system/cpu/cpufreq/pegasusqplus/cpu_online_bias_down_threshold
echo "65" > /sys/devices/system/cpu/cpufreq/pegasusqplus/cpu_online_bias_up_threshold
echo "16" > /sys/devices/system/cpu/cpufreq/pegasusqplus/cpu_up_rate
echo "5" > /sys/devices/system/cpu/cpufreq/pegasusqplus/down_differential
echo "0" > /sys/devices/system/cpu/cpufreq/pegasusqplus/dvfs_debug
echo "1200000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/freq_for_fast_down
echo "400000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/freq_for_responsiveness
echo "37" > /sys/devices/system/cpu/cpufreq/pegasusqplus/freq_step
echo "13" > /sys/devices/system/cpu/cpufreq/pegasusqplus/freq_step_dec
echo "500000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_freq_1_1
echo "200000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_freq_2_0
echo "500000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_freq_2_1
echo "200000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_freq_3_0
echo "700000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_freq_3_1
echo "400000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_freq_4_0
echo "0" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_lock
echo "175" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_rq_1_1
echo "175" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_rq_2_0
echo "275" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_rq_2_1
echo "275" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_rq_3_0
echo "375" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_rq_3_1
echo "375" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_rq_4_0
echo "0" > /sys/devices/system/cpu/cpufreq/pegasusqplus/ignore_nice_load
echo "0" > /sys/devices/system/cpu/cpufreq/pegasusqplus/lcdfreq_enable
echo "20" > /sys/devices/system/cpu/cpufreq/pegasusqplus/lcdfreq_kick_in_down_delay
echo "500000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/lcdfreq_kick_in_freq
echo "0" > /sys/devices/system/cpu/cpufreq/pegasusqplus/max_cpu_lock
echo "0" > /sys/devices/system/cpu/cpufreq/pegasusqplus/min_cpu_lock
echo "1" > /sys/devices/system/cpu/cpufreq/pegasusqplus/sampling_down_factor
echo "30000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/sampling_rate
echo "10000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/sampling_rate_min
echo "1" > /sys/devices/system/cpu/cpufreq/pegasusqplus/up_nr_cpus~1
echo "82" > /sys/devices/system/cpu/cpufreq/pegasusqplus/up_threshold
echo "95" > /sys/devices/system/cpu/cpufreq/pegasusqplus/up_threshold_at_fast_down
echo "40" > /sys/devices/system/cpu/cpufreq/pegasusqplus/up_threshold_at_min_freq
echo "6" > /sys/devices/system/cpu/cpufreq/pegasusqplus/up_threshold_diff
fi
if [ "pegasusqplus - balanced" == "$2" ]; then
echo "30" > /sys/devices/system/cpu/cpufreq/pegasusqplus/cpu_down_rate
echo "2" > /sys/devices/system/cpu/cpufreq/pegasusqplus/cpu_online_bias_count
echo "52" > /sys/devices/system/cpu/cpufreq/pegasusqplus/cpu_online_bias_down_threshold
echo "70" > /sys/devices/system/cpu/cpufreq/pegasusqplus/cpu_online_bias_up_threshold
echo "16" > /sys/devices/system/cpu/cpufreq/pegasusqplus/cpu_up_rate
echo "5" > /sys/devices/system/cpu/cpufreq/pegasusqplus/down_differential
echo "0" > /sys/devices/system/cpu/cpufreq/pegasusqplus/dvfs_debug
echo "1200000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/freq_for_fast_down
echo "400000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/freq_for_responsiveness
echo "4" > /sys/devices/system/cpu/cpufreq/pegasusqplus/freq_step
echo "2" > /sys/devices/system/cpu/cpufreq/pegasusqplus/freq_step_dec
echo "700000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_freq_1_1
echo "200000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_freq_2_0
echo "700000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_freq_2_1
echo "300000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_freq_3_0
echo "900000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_freq_3_1
echo "400000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_freq_4_0
echo "0" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_lock
echo "275" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_rq_1_1
echo "275" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_rq_2_0
echo "375" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_rq_2_1
echo "375" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_rq_3_0
echo "450" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_rq_3_1
echo "450" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_rq_4_0
echo "0" > /sys/devices/system/cpu/cpufreq/pegasusqplus/ignore_nice_load
echo "0" > /sys/devices/system/cpu/cpufreq/pegasusqplus/lcdfreq_enable
echo "20" > /sys/devices/system/cpu/cpufreq/pegasusqplus/lcdfreq_kick_in_down_delay
echo "500000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/lcdfreq_kick_in_freq
echo "0" > /sys/devices/system/cpu/cpufreq/pegasusqplus/max_cpu_lock
echo "0" > /sys/devices/system/cpu/cpufreq/pegasusqplus/min_cpu_lock
echo "1" > /sys/devices/system/cpu/cpufreq/pegasusqplus/sampling_down_factor
echo "32000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/sampling_rate
echo "10000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/sampling_rate_min
echo "1" > /sys/devices/system/cpu/cpufreq/pegasusqplus/up_nr_cpus~1
echo "90" > /sys/devices/system/cpu/cpufreq/pegasusqplus/up_threshold
echo "95" > /sys/devices/system/cpu/cpufreq/pegasusqplus/up_threshold_at_fast_down
echo "55" > /sys/devices/system/cpu/cpufreq/pegasusqplus/up_threshold_at_min_freq
echo "7" > /sys/devices/system/cpu/cpufreq/pegasusqplus/up_threshold_diff
fi
if [ "pegasusqplus - battery" == "$2" ]; then
echo "30" > /sys/devices/system/cpu/cpufreq/pegasusqplus/cpu_down_rate
echo "2" > /sys/devices/system/cpu/cpufreq/pegasusqplus/cpu_online_bias_count
echo "52" > /sys/devices/system/cpu/cpufreq/pegasusqplus/cpu_online_bias_down_threshold
echo "70" > /sys/devices/system/cpu/cpufreq/pegasusqplus/cpu_online_bias_up_threshold
echo "16" > /sys/devices/system/cpu/cpufreq/pegasusqplus/cpu_up_rate
echo "5" > /sys/devices/system/cpu/cpufreq/pegasusqplus/down_differential
echo "0" > /sys/devices/system/cpu/cpufreq/pegasusqplus/dvfs_debug
echo "1200000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/freq_for_fast_down
echo "400000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/freq_for_responsiveness
echo "4" > /sys/devices/system/cpu/cpufreq/pegasusqplus/freq_step
echo "2" > /sys/devices/system/cpu/cpufreq/pegasusqplus/freq_step_dec
echo "900000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_freq_1_1
echo "700000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_freq_2_0
echo "1000000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_freq_2_1
echo "800000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_freq_3_0
echo "1400000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_freq_3_1
echo "1100000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_freq_4_0
echo "0" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_lock
echo "300" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_rq_1_1
echo "300" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_rq_2_0
echo "375" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_rq_2_1
echo "375" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_rq_3_0
echo "450" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_rq_3_1
echo "450" > /sys/devices/system/cpu/cpufreq/pegasusqplus/hotplug_rq_4_0
echo "0" > /sys/devices/system/cpu/cpufreq/pegasusqplus/ignore_nice_load
echo "0" > /sys/devices/system/cpu/cpufreq/pegasusqplus/lcdfreq_enable
echo "20" > /sys/devices/system/cpu/cpufreq/pegasusqplus/lcdfreq_kick_in_down_delay
echo "500000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/lcdfreq_kick_in_freq
echo "0" > /sys/devices/system/cpu/cpufreq/pegasusqplus/max_cpu_lock
echo "0" > /sys/devices/system/cpu/cpufreq/pegasusqplus/min_cpu_lock
echo "1" > /sys/devices/system/cpu/cpufreq/pegasusqplus/sampling_down_factor
echo "32000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/sampling_rate
echo "10000" > /sys/devices/system/cpu/cpufreq/pegasusqplus/sampling_rate_min
echo "1" > /sys/devices/system/cpu/cpufreq/pegasusqplus/up_nr_cpus~1
echo "90" > /sys/devices/system/cpu/cpufreq/pegasusqplus/up_threshold
echo "95" > /sys/devices/system/cpu/cpufreq/pegasusqplus/up_threshold_at_fast_down
echo "55" > /sys/devices/system/cpu/cpufreq/pegasusqplus/up_threshold_at_min_freq
echo "7" > /sys/devices/system/cpu/cpufreq/pegasusqplus/up_threshold_diff
fi
if [ "zzmoove - standard" == "$2" ]; then
# sampling rate and sampling down
echo "100000" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_rate
echo "4" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_rate_sleep_multiplier
echo "1" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_down_factor
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_down_max_momentum
echo "50" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_down_momentum_sensitivity
# freq scaling and hotplugging
echo "70" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold
echo "100" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_sleep
echo "75" >/sys/devices/system/cpu/cpufreq/zzmoove/smooth_up
echo "100" >/sys/devices/system/cpu/cpufreq/zzmoove/smooth_up_sleep
echo "68" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug1
echo "68" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug2
echo "68" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug3
echo "52" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold
echo "60" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_sleep
echo "55" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug1
echo "55" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug2
echo "55" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug3
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug
echo "1" >/sys/devices/system/cpu/cpufreq/zzmoove/hotplug_sleep
# freqency stepping and limit
echo "5" >/sys/devices/system/cpu/cpufreq/zzmoove/freq_step
echo "1" >/sys/devices/system/cpu/cpufreq/zzmoove/freq_step_sleep
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/freq_limit
echo "700000" >/sys/devices/system/cpu/cpufreq/zzmoove/freq_limit_sleep
# fast scaling
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/fast_scaling
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/fast_scaling_sleep
# early demand
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/early_demand
echo "35" >/sys/devices/system/cpu/cpufreq/zzmoove/grad_up_threshold
# nice load
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/ignore_nice_load
# LCDFreq scaling
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_enable
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_kick_in_cores
echo "50" >/sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_kick_in_up_delay
echo "20" >/sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_kick_in_down_delay
echo "500000" >/sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_kick_in_freq
# Hotplug
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq1
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq2
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq3
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq1
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq2
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq3
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/hotplug_block_cycles
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/hotplug_idle_threshold
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug_sleep
fi
if [ "zzmoove - battery" == "$2" ]; then
# sampling rate and sampling down
echo "100000" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_rate
echo "4" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_rate_sleep_multiplier
echo "1" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_down_factor
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_down_max_momentum
echo "50" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_down_momentum_sensitivity
# freq scaling and hotplugging
echo "95" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold
echo "100" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_sleep
echo "75" >/sys/devices/system/cpu/cpufreq/zzmoove/smooth_up
echo "100" >/sys/devices/system/cpu/cpufreq/zzmoove/smooth_up_sleep
echo "60" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug1
echo "80" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug2
echo "98" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug3
echo "40" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold
echo "60" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_sleep
echo "45" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug1
echo "55" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug2
echo "65" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug3
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug
echo "1" >/sys/devices/system/cpu/cpufreq/zzmoove/hotplug_sleep
# freqency stepping and limit
echo "10" >/sys/devices/system/cpu/cpufreq/zzmoove/freq_step
echo "1" >/sys/devices/system/cpu/cpufreq/zzmoove/freq_step_sleep
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/freq_limit
echo "800000" >/sys/devices/system/cpu/cpufreq/zzmoove/freq_limit_sleep
# fast scaling
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/fast_scaling
echo "2" >/sys/devices/system/cpu/cpufreq/zzmoove/fast_scaling_sleep
# early demand
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/early_demand
echo "50" >/sys/devices/system/cpu/cpufreq/zzmoove/grad_up_threshold
# nice load
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/ignore_nice_load
# LCDFreq scaling
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_enable
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_kick_in_cores
echo "1" >/sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_kick_in_up_delay
echo "5" >/sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_kick_in_down_delay
echo "500000" >/sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_kick_in_freq
# Hotplug (reset freq thresholds)
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq1
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq2
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq3
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq1
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq2
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq3
# Hotplug
echo "700000" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq1
echo "1000000" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq2
echo "1200000" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq3
echo "600000" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq1
echo "800000" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq2
echo "1000000" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq3
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/hotplug_block_cycles
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/hotplug_idle_threshold
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug_sleep
fi
if [ "zzmoove - optimal" == "$2" ]; then
# sampling rate and sampling down
echo "45000" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_rate
echo "4" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_rate_sleep_multiplier
echo "4" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_down_factor
echo "20" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_down_max_momentum
echo "50" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_down_momentum_sensitivity
# freq scaling and hotplugging
echo "67" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold
echo "100" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_sleep
echo "75" >/sys/devices/system/cpu/cpufreq/zzmoove/smooth_up
echo "100" >/sys/devices/system/cpu/cpufreq/zzmoove/smooth_up_sleep
echo "68" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug1
echo "78" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug2
echo "88" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug3
echo "52" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold
echo "60" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_sleep
echo "45" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug1
echo "55" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug2
echo "65" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug3
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug
echo "1" >/sys/devices/system/cpu/cpufreq/zzmoove/hotplug_sleep
# freqency stepping and limit
echo "5" >/sys/devices/system/cpu/cpufreq/zzmoove/freq_step
echo "1" >/sys/devices/system/cpu/cpufreq/zzmoove/freq_step_sleep
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/freq_limit
echo "800000" >/sys/devices/system/cpu/cpufreq/zzmoove/freq_limit_sleep
# fast scaling
echo "1" >/sys/devices/system/cpu/cpufreq/zzmoove/fast_scaling
echo "2" >/sys/devices/system/cpu/cpufreq/zzmoove/fast_scaling_sleep
# early demand
echo "1" >/sys/devices/system/cpu/cpufreq/zzmoove/early_demand
echo "35" >/sys/devices/system/cpu/cpufreq/zzmoove/grad_up_threshold
# nice load
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/ignore_nice_load
# LCDFreq scaling
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_enable
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_kick_in_cores
echo "1" >/sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_kick_in_up_delay
echo "5" >/sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_kick_in_down_delay
echo "500000" >/sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_kick_in_freq
# Hotplug (reset freq thresholds)
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq1
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq2
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq3
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq1
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq2
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq3
# Hotplug
echo "500000" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq1
echo "700000" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq2
echo "900000" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq3
echo "400000" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq1
echo "600000" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq2
echo "800000" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq3
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/hotplug_block_cycles
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/hotplug_idle_threshold
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug_sleep
fi
if [ "zzmoove - performance" == "$2" ]; then
# sampling rate and sampling down
echo "40000" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_rate
echo "4" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_rate_sleep_multiplier
echo "4" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_down_factor
echo "50" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_down_max_momentum
echo "25" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_down_momentum_sensitivity
# freq scaling and hotplugging
echo "60" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold
echo "100" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_sleep
echo "70" >/sys/devices/system/cpu/cpufreq/zzmoove/smooth_up
echo "100" >/sys/devices/system/cpu/cpufreq/zzmoove/smooth_up_sleep
echo "65" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug1
echo "75" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug2
echo "85" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug3
echo "20" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold
echo "60" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_sleep
echo "25" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug1
echo "35" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug2
echo "45" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug3
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug
echo "1" >/sys/devices/system/cpu/cpufreq/zzmoove/hotplug_sleep
# freqency stepping and limit
echo "25" >/sys/devices/system/cpu/cpufreq/zzmoove/freq_step
echo "1" >/sys/devices/system/cpu/cpufreq/zzmoove/freq_step_sleep
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/freq_limit
echo "800000" >/sys/devices/system/cpu/cpufreq/zzmoove/freq_limit_sleep
# fast scaling
echo "2" >/sys/devices/system/cpu/cpufreq/zzmoove/fast_scaling
echo "2" >/sys/devices/system/cpu/cpufreq/zzmoove/fast_scaling_sleep
# early demand
echo "1" >/sys/devices/system/cpu/cpufreq/zzmoove/early_demand
echo "25" >/sys/devices/system/cpu/cpufreq/zzmoove/grad_up_threshold
# nice load
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/ignore_nice_load
# LCDFreq scaling
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_enable
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_kick_in_cores
echo "1" >/sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_kick_in_up_delay
echo "5" >/sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_kick_in_down_delay
echo "500000" >/sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_kick_in_freq
# Hotplug (reset freq thresholds)
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq1
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq2
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq3
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq1
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq2
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq3
# Hotplug
echo "400000" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq1
echo "800000" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq2
echo "1000000" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq3
echo "300000" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq1
echo "700000" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq2
echo "900000" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq3
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/hotplug_block_cycles
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/hotplug_idle_threshold
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug_sleep
fi
if [ "zzmoove - battery extreme yank" == "$2" ]; then
# zzmoove governor settings optimized for battery:
echo "60000" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_rate
echo "1" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_down_factor
echo "70" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold
echo "50" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/ignore_nice_load
echo "10" >/sys/devices/system/cpu/cpufreq/zzmoove/freq_step
echo "75" >/sys/devices/system/cpu/cpufreq/zzmoove/smooth_up
# hotplug up threshold per core
echo "90" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug1
echo "95" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug2
echo "98" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug3
echo "1000000" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq1
echo "1200000" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq2
echo "1400000" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq3
# hotplug down threshold per core
echo "70" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug1
echo "80" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug2
echo "90" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug3
echo "800000" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq1
echo "1000000" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq2
echo "1200000" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq3
# hotplug block cycles
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/hotplug_block_cycles
# Screen off settings
echo "4" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_rate_sleep_multiplier
echo "85" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_sleep
echo "75" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_sleep
echo "1" > /sys/devices/system/cpu/cpufreq/zzmoove/freq_step_sleep
echo "90" >/sys/devices/system/cpu/cpufreq/zzmoove/smooth_up_sleep
echo "1" >/sys/devices/system/cpu/cpufreq/zzmoove/hotplug_sleep
fi
if [ "zzmoove - battery yank" == "$2" ]; then
# zzmoove governor settings optimized for battery:
echo "75000" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_rate
echo "1" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_down_factor
echo "60" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold
echo "40" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/ignore_nice_load
echo "10" >/sys/devices/system/cpu/cpufreq/zzmoove/freq_step
echo "65" >/sys/devices/system/cpu/cpufreq/zzmoove/smooth_up
# hotplug up threshold per core
echo "85" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug1
echo "90" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug2
echo "98" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug3
echo "1000000" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq1
echo "1200000" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq2
echo "1400000" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq3
# hotplug down threshold per core
echo "65" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug1
echo "75" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug2
echo "85" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug3
echo "800000" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq1
echo "1000000" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq2
echo "1200000" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq3
# hotplug block cycles
echo "0" >/sys/devices/system/cpu/cpufreq/zzmoove/hotplug_block_cycles
# Screen off settings
echo "4" >/sys/devices/system/cpu/cpufreq/zzmoove/sampling_rate_sleep_multiplier
echo "85" >/sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_sleep
echo "75" >/sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_sleep
echo "1" > /sys/devices/system/cpu/cpufreq/zzmoove/freq_step_sleep
echo "90" >/sys/devices/system/cpu/cpufreq/zzmoove/smooth_up_sleep
echo "1" >/sys/devices/system/cpu/cpufreq/zzmoove/hotplug_sleep
fi
if [ "lulzactiveq - standard" == "$2" ]; then
echo "20" >/sys/devices/system/cpu/cpufreq/lulzactiveq/cpu_down_rate
echo "10" >/sys/devices/system/cpu/cpufreq/lulzactiveq/cpu_up_rate
echo "0" >/sys/devices/system/cpu/cpufreq/lulzactiveq/debug_mode
echo "50" >/sys/devices/system/cpu/cpufreq/lulzactiveq/dec_cpu_load
echo "40000" >/sys/devices/system/cpu/cpufreq/lulzactiveq/down_sample_time
echo "0" >/sys/devices/system/cpu/cpufreq/lulzactiveq/dvfs_debug
echo "1600000 1500000 1400000 1300000 1200000 1100000 1000000 900000 800000 700000 600000 500000 400000 300000 200000" >/sys/devices/system/cpu/cpufreq/lulzactiveq/freq_table
echo "1400000" >/sys/devices/system/cpu/cpufreq/lulzactiveq/hispeed_freq
echo "500000" >/sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_freq_1_1
echo "200000" >/sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_freq_2_0
echo "500000" >/sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_freq_2_1
echo "400000" >/sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_freq_3_0
echo "800000" >/sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_freq_3_1
echo "500000" >/sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_freq_4_0
echo "0" >/sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_lock
echo "200" >/sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_rq_1_1
echo "200" >/sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_rq_2_0
echo "300" >/sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_rq_2_1
echo "300" >/sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_rq_3_0
echo "400" >/sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_rq_3_1
echo "400" >/sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_rq_4_0
echo "50000" >/sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_sampling_rate
echo "0" >/sys/devices/system/cpu/cpufreq/lulzactiveq/ignore_nice_load
echo "85" >/sys/devices/system/cpu/cpufreq/lulzactiveq/inc_cpu_load
echo "0" >/sys/devices/system/cpu/cpufreq/lulzactiveq/max_cpu_lock
echo "0" >/sys/devices/system/cpu/cpufreq/lulzactiveq/min_cpu_lock
echo "1" >/sys/devices/system/cpu/cpufreq/lulzactiveq/pump_down_step
echo "2" >/sys/devices/system/cpu/cpufreq/lulzactiveq/pump_up_step
echo "11" >/sys/devices/system/cpu/cpufreq/lulzactiveq/screen_off_min_step
echo "1" >/sys/devices/system/cpu/cpufreq/lulzactiveq/up_nr_cpus
echo "20000" >/sys/devices/system/cpu/cpufreq/lulzactiveq/up_sample_time
fi
if [ "ondemand - standard" == "$2" ]; then
echo "3" >/sys/devices/system/cpu/cpufreq/ondemand/down_differential
echo "100" >/sys/devices/system/cpu/cpufreq/ondemand/freq_step
echo "0" >/sys/devices/system/cpu/cpufreq/ondemand/ignore_nice_load
echo "0" >/sys/devices/system/cpu/cpufreq/ondemand/io_is_busy
echo "0" >/sys/devices/system/cpu/cpufreq/ondemand/powersave_bias
echo "1" >/sys/devices/system/cpu/cpufreq/ondemand/sampling_down_factor
echo "100000" >/sys/devices/system/cpu/cpufreq/ondemand/sampling_rate
echo "10000" >/sys/devices/system/cpu/cpufreq/ondemand/sampling_rate_min
echo "95" >/sys/devices/system/cpu/cpufreq/ondemand/up_threshold
fi
exit 0
fi
if [ "apply_system_tweaks" == "$1" ]; then
if [ "Off" == "$2" ]; then
echo "16384" > /proc/sys/fs/inotify/max_queued_events
echo "77749" > /proc/sys/fs/file-max
echo "128" > /proc/sys/fs/inotify/max_user_instances
echo "8192" > /proc/sys/fs/inotify/max_user_watches
echo "45" > /proc/sys/fs/lease-break-time
echo "8192" > /proc/sys/kernel/msgmax
echo "1250" > /proc/sys/kernel/msgmni
echo "1" > /proc/sys/kernel/panic
echo "64" > /proc/sys/kernel/random/read_wakeup_threshold
echo "128" > /proc/sys/kernel/random/write_wakeup_threshold
echo "6666666" > /proc/sys/kernel/sched_latency_ns
echo "1333332" > /proc/sys/kernel/sched_wakeup_granularity_ns
echo "1500000" > /proc/sys/kernel/sched_min_granularity_ns
echo "250 32000 32 128" > /proc/sys/kernel/sem
echo "33554432" > /proc/sys/kernel/shmmax
echo "12151" > /proc/sys/kernel/threads-max
echo "131071" > /proc/sys/net/core/rmem_max
echo "2097152" > /proc/sys/net/core/wmem_max
echo "524288 1048576 2097152" > /proc/sys/net/ipv4/tcp_rmem
echo "0" > /proc/sys/net/ipv4/tcp_tw_recycle
echo "262144 524288 1048576" > /proc/sys/net.ipv4/tcp_wmem
echo "5" > /proc/sys/vm/dirty_background_ratio
echo "200" > /proc/sys/vm/dirty_expire_centisecs
echo "20" > /proc/sys/vm/dirty_ratio
echo "500" > /proc/sys/vm/dirty_writeback_centisecs
echo "3638" > /proc/sys/vm/min_free_kbytes
echo "60" > /proc/sys/vm/swappiness
echo "100" > /proc/sys/vm/vfs_cache_pressure
echo "0" > /proc/sys/vm/drop_caches
echo "5" > /proc/sys/net/ipv4/tcp_syn_retries
echo "5" > /proc/sys/net/ipv4/tcp_synack_retries
echo "60" > /proc/sys/net/ipv4/tcp_fin_timeout
fi
if [ "Boeffla tweaks" == "$2" ]; then
echo "32000" > /proc/sys/fs/inotify/max_queued_events
echo "524288" > /proc/sys/fs/file-max
echo "256" > /proc/sys/fs/inotify/max_user_instances
echo "10240" > /proc/sys/fs/inotify/max_user_watches
echo "10" > /proc/sys/fs/lease-break-time
echo "65536" > /proc/sys/kernel/msgmax
echo "2048" > /proc/sys/kernel/msgmni
echo "10" > /proc/sys/kernel/panic
echo "128" > /proc/sys/kernel/random/read_wakeup_threshold
echo "256" > /proc/sys/kernel/random/write_wakeup_threshold
echo "18000000" > /proc/sys/kernel/sched_latency_ns
echo "3000000" > /proc/sys/kernel/sched_wakeup_granularity_ns
echo "1500000" > /proc/sys/kernel/sched_min_granularity_ns
echo "500 512000 64 2048" > /proc/sys/kernel/sem
echo "268435456" > /proc/sys/kernel/shmmax
echo "524288" > /proc/sys/kernel/threads-max
echo "524288" > /proc/sys/net/core/rmem_max
echo "524288" > /proc/sys/net/core/wmem_max
echo "6144 87380 524288" > /proc/sys/net/ipv4/tcp_rmem
echo "1" > /proc/sys/net/ipv4/tcp_tw_recycle
echo "6144 87380 524288" > /proc/sys/net.ipv4/tcp_wmem
echo "70" > /proc/sys/vm/dirty_background_ratio
echo "250" > /proc/sys/vm/dirty_expire_centisecs
echo "90" > /proc/sys/vm/dirty_ratio
echo "500" > /proc/sys/vm/dirty_writeback_centisecs
echo "4096" > /proc/sys/vm/min_free_kbytes
echo "60" > /proc/sys/vm/swappiness
echo "10" > /proc/sys/vm/vfs_cache_pressure
echo "3" > /proc/sys/vm/drop_caches
echo "5" > /proc/sys/net/ipv4/tcp_syn_retries
echo "5" > /proc/sys/net/ipv4/tcp_synack_retries
echo "60" > /proc/sys/net/ipv4/tcp_fin_timeout
fi
if [ "Speedmod tweaks" == "$2" ]; then
echo "16384" > /proc/sys/fs/inotify/max_queued_events
echo "77749" > /proc/sys/fs/file-max
echo "128" > /proc/sys/fs/inotify/max_user_instances
echo "8192" > /proc/sys/fs/inotify/max_user_watches
echo "45" > /proc/sys/fs/lease-break-time
echo "8192" > /proc/sys/kernel/msgmax
echo "1250" > /proc/sys/kernel/msgmni
echo "1" > /proc/sys/kernel/panic
echo "64" > /proc/sys/kernel/random/read_wakeup_threshold
echo "128" > /proc/sys/kernel/random/write_wakeup_threshold
echo "6666666" > /proc/sys/kernel/sched_latency_ns
echo "1333332" > /proc/sys/kernel/sched_wakeup_granularity_ns
echo "1500000" > /proc/sys/kernel/sched_min_granularity_ns
echo "250 32000 32 128" > /proc/sys/kernel/sem
echo "33554432" > /proc/sys/kernel/shmmax
echo "12151" > /proc/sys/kernel/threads-max
echo "131071" > /proc/sys/net/core/rmem_max
echo "2097152" > /proc/sys/net/core/wmem_max
echo "524288 1048576 2097152" > /proc/sys/net/ipv4/tcp_rmem
echo "0" > /proc/sys/net/ipv4/tcp_tw_recycle
echo "262144 524288 1048576" > /proc/sys/net.ipv4/tcp_wmem
echo "5" > /proc/sys/vm/dirty_background_ratio
echo "200" > /proc/sys/vm/dirty_expire_centisecs
echo "20" > /proc/sys/vm/dirty_ratio
echo "1500" > /proc/sys/vm/dirty_writeback_centisecs
echo "12288" > /proc/sys/vm/min_free_kbytes
echo "0" > /proc/sys/vm/swappiness
echo "100" > /proc/sys/vm/vfs_cache_pressure
echo "0" > /proc/sys/vm/drop_caches
echo "2" > /proc/sys/net/ipv4/tcp_syn_retries
echo "2" > /proc/sys/net/ipv4/tcp_synack_retries
echo "10" > /proc/sys/net/ipv4/tcp_fin_timeout
fi
if [ "Mattiadj tweaks" == "$2" ]; then
echo "10" > /proc/sys/vm/dirty_background_ratio
echo "500" > /proc/sys/vm/dirty_expire_centisecs
echo "10" > /proc/sys/vm/dirty_ratio
echo "100" > /proc/sys/vm/dirty_writeback_centisecs
echo "8192" > /proc/sys/vm/min_free_kbytes
echo "1" > /proc/sys/vm/page-cluster
echo "70" > /proc/sys/vm/swappiness
echo "500" > /proc/sys/vm/vfs_cache_pressure
fi
exit 0
fi
if [ "apply_eq_bands" == "$1" ]; then
echo "1 4027 1031 0 276" > /sys/class/misc/boeffla_sound/eq_bands
echo "2 8076 61555 456 456" > /sys/class/misc/boeffla_sound/eq_bands
echo "3 7256 62323 2644 1368" > /sys/class/misc/boeffla_sound/eq_bands
echo "4 5774 63529 1965 4355" > /sys/class/misc/boeffla_sound/eq_bands
echo "5 1380 1369 0 16384" > /sys/class/misc/boeffla_sound/eq_bands
exit 0
fi
if [ "apply_ext4_tweaks" == "$1" ]; then
if [ "1" == "$2" ]; then
busybox sync
mount -o remount,commit=20,noatime /dev/block/mmcblk0p8 /cache
busybox sync
mount -o remount,commit=20,noatime /dev/block/mmcblk0p12 /data
busybox sync
fi
if [ "0" == "$2" ]; then
busybox sync
mount -o remount,commit=0,noatime /dev/block/mmcblk0p8 /cache
busybox sync
mount -o remount,commit=0,noatime /dev/block/mmcblk0p12 /data
busybox sync
fi
exit 0
fi
if [ "apply_zram" == "$1" ]; then
if [ "1" == "$2" ]; then
if [ "1" == "$3" ]; then
busybox swapoff /dev/block/zram0
busybox swapoff /dev/block/zram1
busybox swapoff /dev/block/zram2
busybox swapoff /dev/block/zram3
echo "1" > /sys/block/zram0/reset
echo "1" > /sys/block/zram1/reset
echo "1" > /sys/block/zram2/reset
echo "1" > /sys/block/zram3/reset
busybox mkswap /dev/block/zram0
busybox swapon -p 2 /dev/block/zram0
fi
if [ "2" == "$3" ]; then
busybox swapoff /dev/block/zram0
busybox swapoff /dev/block/zram1
busybox swapoff /dev/block/zram2
busybox swapoff /dev/block/zram3
echo "1" > /sys/block/zram0/reset
echo "1" > /sys/block/zram1/reset
echo "1" > /sys/block/zram2/reset
echo "1" > /sys/block/zram3/reset
busybox mkswap /dev/block/zram0
busybox mkswap /dev/block/zram1
busybox swapon -p 2 /dev/block/zram0
busybox swapon -p 2 /dev/block/zram1
fi
if [ "4" == "$3" ]; then
busybox swapoff /dev/block/zram0
busybox swapoff /dev/block/zram1
busybox swapoff /dev/block/zram2
busybox swapoff /dev/block/zram3
echo "1" > /sys/block/zram0/reset
echo "1" > /sys/block/zram1/reset
echo "1" > /sys/block/zram2/reset
echo "1" > /sys/block/zram3/reset
busybox mkswap /dev/block/zram0
busybox mkswap /dev/block/zram1
busybox mkswap /dev/block/zram2
busybox mkswap /dev/block/zram3
busybox swapon -p 2 /dev/block/zram0
busybox swapon -p 2 /dev/block/zram1
busybox swapon -p 2 /dev/block/zram2
busybox swapon -p 2 /dev/block/zram3
fi
echo "80" > /proc/sys/vm/swappiness
fi
if [ "0" == "$2" ]; then
busybox swapoff /dev/block/zram0
busybox swapoff /dev/block/zram1
busybox swapoff /dev/block/zram2
busybox swapoff /dev/block/zram3
echo "1" > /sys/block/zram0/reset
echo "1" > /sys/block/zram1/reset
echo "1" > /sys/block/zram2/reset
echo "1" > /sys/block/zram3/reset
fi
exit 0
fi
if [ "apply_cifs" == "$1" ]; then
if [ "1" == "$2" ]; then
insmod $LIBPATH/cifs.ko
fi
if [ "0" == "$2" ]; then
rmmod $LIBPATH/cifs.ko
fi
exit 0
fi
if [ "apply_nfs" == "$1" ]; then
if [ "1" == "$2" ]; then
insmod $LIBPATH/sunrpc.ko
insmod $LIBPATH/auth_rpcgss.ko
insmod $LIBPATH/lockd.ko
insmod $LIBPATH/nfs.ko
fi
if [ "0" == "$2" ]; then
rmmod $LIBPATH/nfs.ko
rmmod $LIBPATH/lockd.ko
rmmod $LIBPATH/auth_rpcgss.ko
rmmod $LIBPATH/sunrpc.ko
fi
exit 0
fi
if [ "apply_xbox" == "$1" ]; then
if [ "1" == "$2" ]; then
insmod $LIBPATH/xpad.ko
fi
if [ "0" == "$2" ]; then
rmmod $LIBPATH/xpad.ko
fi
exit 0
fi
if [ "apply_exfat" == "$1" ]; then
if [ "1" == "$2" ]; then
insmod $LIBPATH/exfat_core.ko
insmod $LIBPATH/exfat_fs.ko
fi
if [ "0" == "$2" ]; then
rmmod $LIBPATH/exfat_fs.ko
rmmod $LIBPATH/exfat_core.ko
fi
exit 0
fi
if [ "apply_ntfs" == "$1" ]; then
if [ "1" == "$2" ]; then
insmod $LIBPATH/ntfs.ko
fi
if [ "0" == "$2" ]; then
rmmod $LIBPATH/ntfs.ko
fi
exit 0
fi
if [ "apply_ums" == "$1" ]; then
if [ "1" == "$2" ]; then
umount -l /mnt/extSdCard/
/system/bin/setprop persist.sys.usb.config mass_storage,adb
echo /dev/block/vold/179:49 > /sys/devices/platform/s3c-usbgadget/gadget/lun0/file
fi
if [ "0" == "$2" ]; then
echo "" > /sys/devices/platform/s3c-usbgadget/gadget/lun0/file
/system/bin/vold
/system/bin/setprop persist.sys.usb.config mtp,adb
fi
exit 0
fi
# *******************
# Actions
# *******************
if [ "action_debug_info_file" == "$1" ]; then
echo $(date) Full debug log file start > $2
echo -e "\n============================================\n" >> $2
echo -e "\n**** Boeffla-Kernel version\n" >> $2
cat /proc/version >> $2
echo -e "\n**** Firmware information\n" >> $2
busybox grep ro.build.version /system/build.prop >> $2
echo -e "\n============================================\n" >> $2
echo -e "\n**** Boeffla-Kernel config\n" >> $2
cat /sdcard/boeffla-kernel/boeffla-kernel.conf >> $2
echo -e "\n============================================\n" >> $2
echo -e "\n**** Boeffla-Kernel log\n" >> $2
cat /sdcard/boeffla-kernel-data/boeffla-kernel.log >> $2
echo -e "\n**** Boeffla-Kernel log 1\n" >> $2
cat /sdcard/boeffla-kernel-data/boeffla-kernel.log.1 >> $2
echo -e "\n**** Boeffla-Kernel log 2\n" >> $2
cat /sdcard/boeffla-kernel-data/boeffla-kernel.log.2 >> $2
echo -e "\n**** Boeffla-Kernel log 3\n" >> $2
cat /sdcard/boeffla-kernel-data/boeffla-kernel.log.3 >> $2
echo -e "\n============================================\n" >> $2
echo -e "\n**** Boeffla-Config app log\n" >> $2
cat /sdcard/boeffla-kernel-data/bc.log >> $2
echo -e "\n**** Boeffla-Config app log 1\n" >> $2
cat /sdcard/boeffla-kernel-data/bc.log.1 >> $2
echo -e "\n**** Boeffla-Config app log 2\n" >> $2
cat /sdcard/boeffla-kernel-data/bc.log.2 >> $2
echo -e "\n**** Boeffla-Config app log 3\n" >> $2
cat /sdcard/boeffla-kernel-data/bc.log.3 >> $2
echo -e "\n============================================\n" >> $2
echo -e "\n**** boeffla_sound\n" >> $2
cat /sys/class/misc/boeffla_sound/boeffla_sound >> $2
echo -e "\n**** headphone_volume\n" >> $2
cat /sys/class/misc/boeffla_sound/headphone_volume >> $2
echo -e "\n**** speaker_volume\n" >> $2
cat /sys/class/misc/boeffla_sound/speaker_volume >> $2
echo -e "\n**** speaker_tuning\n" >> $2
cat /sys/class/misc/boeffla_sound/speaker_tuning >> $2
echo -e "\n**** privacy_mode\n" >> $2
cat /sys/class/misc/boeffla_sound/privacy_mode >> $2
echo -e "\n**** equalizer\n" >> $2
cat /sys/class/misc/boeffla_sound/eq >> $2
echo -e "\n**** eq_gains\n" >> $2
cat /sys/class/misc/boeffla_sound/eq_gains >> $2
echo -e "\n**** eq_gains_alt\n" >> $2
cat /sys/class/misc/boeffla_sound/eq_gains_alt >> $2
echo -e "\n**** eq_bands\n" >> $2
cat /sys/class/misc/boeffla_sound/eq_bands >> $2
echo -e "\n**** dac_direct\n" >> $2
cat /sys/class/misc/boeffla_sound/dac_direct >> $2
echo -e "\n**** dac_oversampling\n" >> $2
cat /sys/class/misc/boeffla_sound/dac_oversampling >> $2
echo -e "\n**** fll_tuning\n" >> $2
cat /sys/class/misc/boeffla_sound/fll_tuning >> $2
echo -e "\n**** stereo_expansion\n" >> $2
cat /sys/class/misc/boeffla_sound/stereo_expansion >> $2
echo -e "\n**** mono_downmix\n" >> $2
cat /sys/class/misc/boeffla_sound/mono_downmix >> $2
echo -e "\n**** mic_level_general\n" >> $2
cat /sys/class/misc/boeffla_sound/mic_level_general >> $2
echo -e "\n**** mic_level_call\n" >> $2
cat /sys/class/misc/boeffla_sound/mic_level_call >> $2
echo -e "\n**** debug_level\n" >> $2
cat /sys/class/misc/boeffla_sound/debug_level >> $2
echo -e "\n**** debug_info\n" >> $2
cat /sys/class/misc/boeffla_sound/debug_info >> $2
echo -e "\n**** version\n" >> $2
cat /sys/class/misc/boeffla_sound/version >> $2
echo "\n============================================\n" >> $2
echo -e "\n**** Loaded modules:\n" >> $2
lsmod >> $2
echo -e "\n**** Max CPU frequency:\n" >> $2
cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq >> $2
echo -e "\n**** CPU undervolting:\n" >> $2
cat /sys/devices/system/cpu/cpu0/cpufreq/UV_mV_table >> $2
echo -e "\n**** GPU frequencies:\n" >> $2
cat /sys/class/misc/gpu_clock_control/gpu_control >> $2
echo -e "\n**** GPU undervolting:\n" >> $2
cat /sys/class/misc/gpu_voltage_control/gpu_control >> $2
echo -e "\n**** ASV level:\n" >> $2
cat /sys/devices/system/cpu/cpu0/cpufreq/asv_level >> $2
echo -e "\n**** Root:\n" >> $2
ls /system/xbin/su >> $2
ls /system/app/Superuser.apk >> $2
echo -e "\n**** Busybox:\n" >> $2
ls /sbin/busybox >> $2
ls /system/bin/busybox >> $2
ls /system/xbin/busybox >> $2
echo -e "\n**** Mounts:\n" >> $2
mount | busybox grep /data >> $2
mount | busybox grep /cache >> $2
echo -e "\n**** SD Card read ahead:\n" >> $2
cat /sys/block/mmcblk0/bdi/read_ahead_kb >> $2
cat /sys/block/mmcblk1/bdi/read_ahead_kb >> $2
echo -e "\n**** Various kernel settings by config app:\n" >> $2
echo -e "\n(gov prof, cpu volt prof, gpu freq prof, gpu volt prof, eq prof, mdnie over, sys tweaks, swapp over)\n" >> $2
cat /dev/bk_governor_profile >> $2
cat /dev/bk_cpu_voltages_profile >> $2
cat /dev/bk_gpu_frequencies_profile >> $2
cat /dev/bk_gpu_voltages_profile >> $2
cat /dev/bk_eq_gains_profile >> $2
cat /dev/bk_mdnie_overwrite >> $2
cat /dev/bk_system_tweaks >> $2
cat /dev/bk_swappiness_overwrite >> $2
echo -e "\n**** Touch boost switch:\n" >> $2
cat /sys/class/misc/touchboost_switch/touchboost_switch >> $2
echo -e "\n**** Touch boost frequency:\n" >> $2
cat /sys/class/misc/touchboost_switch/touchboost_freq >> $2
echo -e "\n**** Touch wake:\n" >> $2
cat /sys/class/misc/touchwake/enabled >> $2
cat /sys/class/misc/touchwake/delay >> $2
echo -e "\n**** Early suspend:\n" >> $2
cat /sys/kernel/early_suspend/early_suspend_delay >> $2
echo -e "\n**** Charging levels (ac/usb/wireless):\n" >> $2
cat /sys/kernel/charge_levels/charge_level_ac >> $2
cat /sys/kernel/charge_levels/charge_level_usb >> $2
cat /sys/kernel/charge_levels/charge_level_wireless >> $2
echo -e "\n**** Charging instable power / ignore safety margin:\n" >> $2
cat /sys/kernel/charge_levels/ignore_unstable_power >> $2
cat /sys/kernel/charge_levels/ignore_safety_margin >> $2
echo -e "\n**** Governor:\n" >> $2
cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor >> $2
echo -e "\n**** Scheduler:\n" >> $2
cat /sys/block/mmcblk0/queue/scheduler >> $2
cat /sys/block/mmcblk1/queue/scheduler >> $2
echo -e "\n**** Kernel Logger:\n" >> $2
cat /sys/kernel/printk_mode/printk_mode >> $2
echo -e "\n**** Android Logger:\n" >> $2
cat /sys/kernel/logger_mode/logger_mode >> $2
echo -e "\n**** Sharpness fix:\n" >> $2
cat /sys/class/misc/mdnie_preset/mdnie_preset >> $2
echo -e "\n**** LED fading:\n" >> $2
cat /sys/class/sec/led/led_fade >> $2
echo -e "\n**** LED intensity:\n" >> $2
cat /sys/class/sec/led/led_intensity >> $2
echo -e "\n**** LED speed:\n" >> $2
cat /sys/class/sec/led/led_speed >> $2
echo -e "\n**** LED slope:\n" >> $2
cat /sys/class/sec/led/led_slope >> $2
echo -e "\n**** zRam disk size:\n" >> $2
cat /sys/block/zram0/disksize >> $2
cat /sys/block/zram1/disksize >> $2
cat /sys/block/zram2/disksize >> $2
cat /sys/block/zram3/disksize >> $2
echo -e "\n**** zRam compressed data size:\n" >> $2
cat /sys/block/zram0/compr_data_size >> $2
cat /sys/block/zram1/compr_data_size >> $2
cat /sys/block/zram2/compr_data_size >> $2
cat /sys/block/zram3/compr_data_size >> $2
echo -e "\n**** zRam original data size:\n" >> $2
cat /sys/block/zram0/orig_data_size >> $2
cat /sys/block/zram1/orig_data_size >> $2
cat /sys/block/zram2/orig_data_size >> $2
cat /sys/block/zram3/orig_data_size >> $2
echo -e "\n**** Uptime:\n" >> $2
cat /proc/uptime >> $2
echo -e "\n**** Frequency usage table:\n" >> $2
cat /sys/devices/system/cpu/cpu0/cpufreq/stats/time_in_state >> $2
echo -e "\n**** Memory:\n" >> $2
busybox free -m >> $2
echo -e "\n**** Meminfo:\n" >> $2
cat /proc/meminfo >> $2
echo -e "\n**** Swap:\n" >> $2
cat /proc/swaps >> $2
echo -e "\n**** Low memory killer:\n" >> $2
cat /sys/module/lowmemorykiller/parameters/minfree >> $2
echo -e "\n**** Swappiness:\n" >> $2
cat /proc/sys/vm/swappiness >> $2
echo -e "\n**** Storage:\n" >> $2
busybox df >> $2
echo -e "\n**** Mounts:\n" >> $2
mount >> $2
echo -e "\n**** pegasusq tuneables\n" >> $2
cat /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_1_1 >> $2
cat /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_2_0 >> $2
cat /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_1_1 >> $2
cat /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_2_0 >> $2
cat /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_2_1 >> $2
cat /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_3_0 >> $2
cat /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_2_1 >> $2
cat /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_3_0 >> $2
cat /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_3_1 >> $2
cat /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_freq_4_0 >> $2
cat /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_3_1 >> $2
cat /sys/devices/system/cpu/cpufreq/pegasusq/hotplug_rq_4_0 >> $2
cat /sys/devices/system/cpu/cpufreq/pegasusq/cpu_down_rate >> $2
cat /sys/devices/system/cpu/cpufreq/pegasusq/cpu_up_rate >> $2
cat /sys/devices/system/cpu/cpufreq/pegasusq/up_threshold >> $2
cat /sys/devices/system/cpu/cpufreq/pegasusq/freq_step >> $2
echo -e "\n**** zzmoove tuneables\n" >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/sampling_rate >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/sampling_rate_sleep_multiplier >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/sampling_down_factor >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/sampling_down_max_momentum >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/sampling_down_momentum_sensitivity >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/up_threshold >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_sleep >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/smooth_up >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/smooth_up_sleep >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug1 >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug2 >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug3 >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/down_threshold >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_sleep >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug1 >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug2 >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug3 >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq1 >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq2 >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/up_threshold_hotplug_freq3 >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq1 >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq2 >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/down_threshold_hotplug_freq3 >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/hotplug_block_cycles >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/hotplug_idle_threshold >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug_sleep >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/hotplug_sleep >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/freq_step >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/freq_step_sleep >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/freq_limit >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/freq_limit_sleep >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/fast_scaling >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/fast_scaling_sleep >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/early_demand >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/grad_up_threshold >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/ignore_nice_load >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_enable >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_kick_in_cores >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_kick_in_up_delay >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_kick_in_down_delay >> $2
cat /sys/devices/system/cpu/cpufreq/zzmoove/lcdfreq_kick_in_freq >> $2
echo -e "\n**** lulzactiveq tuneables\n" >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/cpu_down_rate >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/cpu_up_rate >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/debug_mode >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/dec_cpu_load >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/down_sample_time >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/dvfs_debug >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/freq_table >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/hispeed_freq >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_freq_1_1 >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_freq_2_0 >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_freq_2_1 >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_freq_3_0 >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_freq_3_1 >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_freq_4_0 >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_lock >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_rq_1_1 >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_rq_2_0 >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_rq_2_1 >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_rq_3_0 >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_rq_3_1 >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_rq_4_0 >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/hotplug_sampling_rate >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/ignore_nice_load >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/inc_cpu_load >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/max_cpu_lock >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/min_cpu_lock >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/pump_down_step >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/pump_up_step >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/screen_off_min_step >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/up_nr_cpus >> $2
cat /sys/devices/system/cpu/cpufreq/lulzactiveq/up_sample_time >> $2
echo -e "\n**** ondemand tuneables\n" >> $2
cat /sys/devices/system/cpu/cpufreq/ondemand/down_differential >> $2
cat /sys/devices/system/cpu/cpufreq/ondemand/freq_step >> $2
cat /sys/devices/system/cpu/cpufreq/ondemand/ignore_nice_load >> $2
cat /sys/devices/system/cpu/cpufreq/ondemand/io_is_busy >> $2
cat /sys/devices/system/cpu/cpufreq/ondemand/powersave_bias >> $2
cat /sys/devices/system/cpu/cpufreq/ondemand/sampling_down_factor >> $2
cat /sys/devices/system/cpu/cpufreq/ondemand/sampling_rate >> $2
cat /sys/devices/system/cpu/cpufreq/ondemand/sampling_rate_min >> $2
cat /sys/devices/system/cpu/cpufreq/ondemand/up_threshold >> $2
echo -e "\n============================================\n" >> $2
echo -e "\n**** /data/app folder\n" >> $2
ls -l /data/app >> $2
echo -e "\n**** /system/app folder\n" >> $2
ls -l /system/app >> $2
echo -e "\n============================================\n" >> $2
echo -e "\n**** /system/etc/init.d folder\n" >> $2
ls -l /system/etc/init.d >> $2
echo -e "\n**** /etc/init.d folder\n" >> $2
ls -l /etc/init.d >> $2
echo -e "\n**** /data/init.d folder\n" >> $2
ls -l /data/init.d >> $2
echo -e "\n============================================\n" >> $2
echo -e "\n**** last_kmsg\n" >> $2
cat /proc/last_kmsg >> $2
echo -e "\n============================================\n" >> $2
echo -e "\n**** dmesg\n" >> $2
dmesg >> $2
echo -e "\n============================================\n" >> $2
echo $(date) Full debug log file end >> $2
exit 0
fi
if [ "action_reboot" == "$1" ]; then
busybox sync
busybox sleep 1s
/system/bin/reboot
exit 0
fi
if [ "action_reboot_cwm" == "$1" ]; then
busybox sync
busybox sleep 1s
/system/bin/reboot recovery
exit 0
fi
if [ "action_reboot_download" == "$1" ]; then
busybox sync
busybox sleep 1s
/system/bin/reboot download
exit 0
fi
if [ "action_wipe_caches_reboot" == "$1" ]; then
busybox rm -rf /cache/*
busybox rm -rf /data/dalvik-cache/*
busybox sync
busybox sleep 1s
/system/bin/reboot
exit 0
fi
if [ "action_wipe_clipboard_cache" == "$1" ]; then
busybox rm -rf /data/clipboard/*
busybox sync
exit 0
fi
if [ "action_clean_initd" == "$1" ]; then
busybox tar cvz -f $2 /system/etc/init.d
mount -o remount,rw -t ext4 /dev/block/mmcblk0p9 /system
busybox rm /system/etc/init.d/*
mount -o remount,ro -t ext4 /dev/block/mmcblk0p9 /system
exit 0
fi
if [ "action_fix_permissions" == "$1" ]; then
busybox sh /res/bc/fix_permissions
busybox sync
exit 0
fi
if [ "action_fstrim" == "$1" ]; then
echo -e "Trim /data"
/res/bc/fstrim -v /data
echo -e ""
echo -e "Trim /cache"
/res/bc/fstrim -v /cache
echo -e ""
echo -e "Trim /system"
/res/bc/fstrim -v /system
echo -e ""
busybox sync
exit 0
fi
if [ "flash_kernel" == "$1" ]; then
busybox dd if=$2 of=/dev/block/mmcblk0p5
exit 0
fi
if [ "archive_kernel" == "$1" ]; then
IMGPATH=$2
cd ${IMGPATH%/*}
busybox rm $3.tar
busybox rm $3.tar.md5
busybox tar cvf $3.tar ${IMGPATH##*/}
busybox md5sum $3.tar >> $3.tar
busybox mv $3.tar $3.tar.md5
busybox chmod 666 $3.tar.md5
busybox rm $2
exit 0
fi
if [ "extract_kernel" == "$1" ]; then
busybox tar -xvf $2 -C $3
exit 0
fi
if [ "flash_recovery" == "$1" ]; then
busybox dd if=$2 of=/dev/block/mmcblk0p6
exit 0
fi
if [ "extract_recovery" == "$1" ]; then
busybox tar -xvf $2 -C $3
exit 0
fi
if [ "flash_modem" == "$1" ]; then
busybox dd if=$2 of=/dev/block/mmcblk0p7
exit 0
fi
if [ "extract_modem" == "$1" ]; then
busybox tar -xvf $2 -C $3
exit 0
fi
if [ "flash_cm_kernel" == "$1" ]; then
busybox dd if=$2/boot.img of=/dev/block/mmcblk0p5
mount -o remount,rw -t ext4 /dev/block/mmcblk0p9 /system
busybox rm -f /system/lib/modules/*
busybox cp $2/system/lib/modules/* /system/lib/modules
busybox chmod 644 /system/lib/modules/*
mount -o remount,ro -t ext4 /dev/block/mmcblk0p9 /system
exit 0
fi
if [ "extract_cm_kernel" == "$1" ]; then
busybox unzip $2 -d $3
exit 0
fi
|
Zzomborg/Laearning
|
ramdisk_boeffla/res/bc/bccontroller.sh
|
Shell
|
gpl-2.0
| 75,940 |
#!/bin/bash
# ===========================================================================
# eXe
# Copyright 2012-2013, Pedro Peña Pérez, Open Phoenix IT
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Help
#-----
# Pybabel doc http://babel.pocoo.org/docs/cmdline/
# Pybabel source https://github.com/mitsuhiko/babel
# Transecma (py2json.py) https://github.com/nandoflorestan/bag/tree/master/bag/web
#
#
# Changes
# -------
# 2013-10:
# * Usage of Babel1.3 (Pedro Peña)
# Uses python Babel 1.3 patched to include 'Language' header
# (https://dl.dropboxusercontent.com/s/k1i7ph2m2g4s7kx/Babel-1.3.tar.gz)
# as discussed here:
# https://forja.cenatic.es/tracker/index.php?func=detail&aid=1905&group_id=197&atid=883
#
# * Changed --version from '1.04.1' to '2.0' (JRF)
#
# 2014-03-17:
# * Changed options of pybabel update (JRF)
# trying to fix the duplicate msgstr problem
# comment the -N option out
# Option -N, --no-fuzzy-matching: "do not use fuzzy matching" (default False)
# add --ignore-obsolete
# * Pybabel compile (JRF)
# documented that we've never used option -f ("also include fuzzy translations") ¿?
#
# 2015-02-26:
# * Version 2.0.2 (JRF)
#
# 2015-03-10:
# * Preparing version 2.1 (JRF)
#
#===========================================================================
export PYTHONPATH=.
project="eXeLearning"
version="2.1"
# 1.- pyBabel - Extraction of strings from *.py and *.js into new POT
echo -e " *** Extracting messages from python exe files, jsui javascript and html template files ***\n"
pybabel extract --keyword=x_ --keyword=c_ --project "$project" --version "$version" -F pybabel.conf --sort-by-file . > exe/locale/messages.pot
# tools/nevow-xmlgettext exe/jsui/templates/mainpage.html exe/webui/templates/about.html | msgcat exe/locale/messages.pot.tmp - -o exe/locale/messages.pot
# rm exe/locale/messages.pot.tmp
# Removal of fuzzy comments from the POT file
sed -i "s/^#, fuzzy\$//" exe/locale/messages.pot
# 2.- pyBabel - Updating the PO files of the different languages
echo -e "\n\n\n *** Updating *.po files ***\n"
pybabel update -D exe -i exe/locale/messages.pot -d exe/locale/ --ignore-obsolete
# Set correct Project-Id-Version
find exe -name exe.po | xargs sed -i 's/Project-Id-Version:.*/Project-Id-Version: '"$project $version"'\\n"/'
# 3.- pyBabel - Compiling the MO files
echo -e "\n\n\n *** Compiling *.mo files ***\n"
pybabel compile -D exe -d exe/locale/ --statistics
# pybabel bugs fixing
find exe -name exe.po | xargs sed -i 'N;N;/#~ msgid ""\n#~ msgstr ""/d' # Clean wrong commented msgids
find exe -name exe.po | xargs sed -i '1!N;1!N;/#~ msgid ""\n#~ msgstr ""/d' # Clean wrong commented msgids
# 4.- Transecma - Generating the translated JS files for the different languages
echo -e "\n\n\n *** Compiling javascript for jsui files ***\n"
python tools/po2json.py --domain exe --directory exe/locale --output-dir exe/jsui/scripts/i18n
|
UstadMobile/exelearning-extjs5-mirror
|
tools/mki18n.sh
|
Shell
|
gpl-2.0
| 3,645 |
#!/bin/sh
usage() {
cat <<"_EOF_"
Usage: sh test/pkgcheck.sh [--zlib-compat]
Verifies that the various build systems produce identical results on a Unixlike system.
If --zlib-compat, tests with zlib compatible builds.
To build the 32 bit version for the current 64 bit arch:
$ sudo apt install ninja-build diffoscope gcc-multilib
$ export CMAKE_ARGS="-DCMAKE_C_FLAGS=-m32" CFLAGS=-m32 LDFLAGS=-m32
$ sh test/pkgcheck.sh
To cross-build, install the appropriate qemu and gcc packages,
and set the environment variables used by configure or cmake.
On Ubuntu, for example (values taken from .github/workflows/pkgconf.yml):
arm HF:
$ sudo apt install ninja-build diffoscope qemu gcc-arm-linux-gnueabihf libc6-dev-armhf-cross
$ export CHOST=arm-linux-gnueabihf
$ export CMAKE_ARGS="-DCMAKE_TOOLCHAIN_FILE=cmake/toolchain-arm.cmake -DCMAKE_C_COMPILER_TARGET=${CHOST}"
aarch64:
$ sudo apt install ninja-build diffoscope qemu gcc-aarch64-linux-gnu libc6-dev-arm64-cross
$ export CHOST=aarch64-linux-gnu
$ export CMAKE_ARGS="-DCMAKE_TOOLCHAIN_FILE=cmake/toolchain-aarch64.cmake -DCMAKE_C_COMPILER_TARGET=${CHOST}"
ppc (32 bit big endian):
$ sudo apt install ninja-build diffoscope qemu gcc-powerpc-linux-gnu libc6-dev-powerpc-cross
$ export CHOST=powerpc-linux-gnu
$ export CMAKE_ARGS="-DCMAKE_TOOLCHAIN_FILE=cmake/toolchain-powerpc.cmake"
ppc64le:
$ sudo apt install ninja-build diffoscope qemu gcc-powerpc64le-linux-gnu libc6-dev-ppc64el-cross
$ export CHOST=powerpc64le-linux-gnu
$ export CMAKE_ARGS="-DCMAKE_TOOLCHAIN_FILE=cmake/toolchain-powerpc64le.cmake"
then:
$ export CC=${CHOST}-gcc
$ sh test/pkgcheck.sh [--zlib-compat]
Note: on Mac, you may also need to do 'sudo xcode-select -r' to get cmake to match configure/make's behavior (i.e. omit -isysroot).
_EOF_
}
set -ex
# Caller can also set CMAKE_ARGS or CONFIGURE_ARGS if desired
CMAKE_ARGS=${CMAKE_ARGS}
CONFIGURE_ARGS=${CONFIGURE_ARGS}
case "$1" in
--zlib-compat)
suffix=""
CMAKE_ARGS="$CMAKE_ARGS -DZLIB_COMPAT=ON"
CONFIGURE_ARGS="$CONFIGURE_ARGS --zlib-compat"
;;
"")
suffix="-ng"
;;
*)
echo "Unknown arg '$1'"
usage
exit 1
;;
esac
if ! test -f "configure"
then
echo "Please run from top of source tree"
exit 1
fi
# Tell GNU's ld etc. to use Jan 1 1970 when embedding timestamps
# Probably only needed on older systems (ubuntu 14.04, BSD?)
export SOURCE_DATE_EPOCH=0
case $(uname) in
Darwin)
# Tell Apple's ar etc. to use zero timestamps
export ZERO_AR_DATE=1
# What CPU are we running on, exactly?
sysctl -n machdep.cpu.brand_string
sysctl -n machdep.cpu.features
sysctl -n machdep.cpu.leaf7_features
sysctl -n machdep.cpu.extfeatures
;;
esac
# Use same compiler for make and cmake builds
if test "$CC"x = ""x
then
if clang --version
then
export CC=clang
elif gcc --version
then
export CC=gcc
fi
fi
# New build system
# Happens to delete top-level zconf.h
# (which itself is a bug, https://github.com/madler/zlib/issues/162 )
# which triggers another bug later in configure,
# https://github.com/madler/zlib/issues/499
rm -rf btmp2 pkgtmp2
mkdir btmp2 pkgtmp2
export DESTDIR=$(pwd)/pkgtmp2
cd btmp2
cmake -G Ninja ${CMAKE_ARGS} ..
ninja -v
ninja install
cd ..
# Original build system
rm -rf btmp1 pkgtmp1
mkdir btmp1 pkgtmp1
export DESTDIR=$(pwd)/pkgtmp1
cd btmp1
case $(uname) in
Darwin)
export LDFLAGS="-Wl,-headerpad_max_install_names"
;;
esac
../configure $CONFIGURE_ARGS
make
make install
cd ..
repack_ar() {
if ! cmp --silent pkgtmp1/usr/local/lib/libz$suffix.a pkgtmp2/usr/local/lib/libz$suffix.a
then
echo "libz$suffix.a does not match. Probably filenames differ (.o vs .c.o). Unpacking and renaming..."
# Note: %% is posix shell syntax meaning "Remove Largest Suffix Pattern", see
# https://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_06_02
cd pkgtmp1; ar x usr/local/lib/libz$suffix.a; rm usr/local/lib/libz$suffix.a; cd ..
cd pkgtmp2; ar x usr/local/lib/libz$suffix.a; rm usr/local/lib/libz$suffix.a; for a in *.c.o; do mv $a ${a%%.c.o}.o; done; cd ..
# Also, remove __.SYMDEF SORTED if present, as it has those funky .c.o names embedded in it.
rm -f pkgtmp[12]/__.SYMDEF\ SORTED
fi
}
case $(uname) in
Darwin)
# Remove the build uuid.
dylib1=$(find pkgtmp1 -type f -name '*.dylib*')
dylib2=$(find pkgtmp2 -type f -name '*.dylib*')
strip -x -no_uuid "$dylib1"
strip -x -no_uuid "$dylib2"
;;
esac
# The ar on newer systems defaults to -D (i.e. deterministic),
# but FreeBSD 12.1, Debian 8, and Ubuntu 14.04 seem to not do that.
# I had trouble passing -D safely to the ar inside CMakeLists.txt,
# so punt and unpack the archive if needed before comparing.
# Also, cmake uses different .o suffix anyway...
repack_ar
if diff -Nur pkgtmp1 pkgtmp2
then
echo pkgcheck-cmake-bits-identical PASS
else
echo pkgcheck-cmake-bits-identical FAIL
dylib1=$(find pkgtmp1 -type f -name '*.dylib*' -print -o -type f -name '*.so.*' -print)
dylib2=$(find pkgtmp2 -type f -name '*.dylib*' -print -o -type f -name '*.so.*' -print)
diffoscope $dylib1 $dylib2 | cat
exit 1
fi
rm -rf btmp1 btmp2 pkgtmp1 pkgtmp2
# any failure would have caused an early exit already
echo "pkgcheck: PASS"
|
af4t/Server
|
libs/zlibng/test/pkgcheck.sh
|
Shell
|
gpl-3.0
| 5,229 |
#!/bin/sh -e
#
# Copyright (c) 2012 Simone Basso <[email protected]>,
# NEXA Center for Internet & Society at Politecnico di Torino
#
# This file is part of Neubot <http://www.neubot.org/>.
#
# Neubot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Neubot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Neubot. If not, see <http://www.gnu.org/licenses/>.
#
#
# Make sure the migration table contains all the functions
# defined in neubot/migrate.py
#
NUM_DEF=$(grep '^def migrate_from__' neubot/database/migrate.py|wc -l)
NUM_TBL=$(grep '^ migrate_from__' neubot/database/migrate.py|wc -l)
if [ "$NUM_DEF" != "$NUM_TBL" ]; then
echo "<ERR> Not all migrate functions in migrate table" 1>&2
echo "<ERR> Migrate funcs: $NUM_DEF, migrate table: $NUM_TBL" 1>&2
exit 1
fi
|
neubot/neubot-server
|
regress/neubot/database/migrate.sh
|
Shell
|
gpl-3.0
| 1,243 |
#!/bin/bash
# Copyright 2014 Yajie Miao Carnegie Mellon University Apache 2.0
# This script trains tandem systems using bottleneck features (BNFs). The
# BNF network is trained over fMLLR features. It is to be run after run.sh.
# Before running this, you should already build the initial GMM model. This
# script requires a GPU, and also the "pdnn" toolkit to train the BNF net.
# For more informaiton regarding the recipes and results, visit the webiste
# http://www.cs.cmu.edu/~ymiao/kaldipdnn
working_dir=exp_pdnn/bnf_tandem
gmmdir=exp/tri3
# Specify the gpu device to be used
gpu=gpu
cmd=run.pl
. cmd.sh
[ -f path.sh ] && . ./path.sh
. parse_options.sh || exit 1;
# At this point you may want to make sure the directory $working_dir is
# somewhere with a lot of space, preferably on the local GPU-containing machine.
if [ ! -d pdnn ]; then
echo "Checking out PDNN code."
svn co https://github.com/yajiemiao/pdnn/trunk pdnn
fi
if [ ! -d steps_pdnn ]; then
echo "Checking out steps_pdnn scripts."
svn co https://github.com/yajiemiao/kaldipdnn/trunk/steps_pdnn steps_pdnn
fi
if ! nvidia-smi; then
echo "The command nvidia-smi was not found: this probably means you don't have a GPU."
echo "(Note: this script might still work, it would just be slower.)"
fi
# The hope here is that Theano has been installed either to python or to python2.6
pythonCMD=python
if ! python -c 'import theano;'; then
if ! python2.6 -c 'import theano;'; then
echo "Theano does not seem to be installed on your machine. Not continuing."
echo "(Note: this script might still work, it would just be slower.)"
exit 1;
else
pythonCMD=python2.6
fi
fi
mkdir -p $working_dir/log
! gmm-info $gmmdir/final.mdl >&/dev/null && \
echo "Error getting GMM info from $gmmdir/final.mdl" && exit 1;
num_pdfs=`gmm-info $gmmdir/final.mdl | grep pdfs | awk '{print $NF}'` || exit 1;
echo =====================================================================
echo " Data Split & Alignment & Feature Preparation "
echo =====================================================================
# Split training data into traing and cross-validation sets for DNN
if [ ! -d data/train_tr95 ]; then
utils/subset_data_dir_tr_cv.sh --cv-spk-percent 5 data/train data/train_tr95 data/train_cv05 || exit 1
fi
# Alignment on the training and validation data.
for set in tr95 cv05; do
if [ ! -d ${gmmdir}_ali_$set ]; then
steps/align_fmllr.sh --nj 24 --cmd "$train_cmd" \
data/train_$set data/lang $gmmdir ${gmmdir}_ali_$set || exit 1
fi
done
# Dump fMLLR features. "Fake" cmvn states (0 means and 1 variance) are applied.
for set in tr95 cv05; do
if [ ! -d $working_dir/data/train_$set ]; then
steps/nnet/make_fmllr_feats.sh --nj 24 --cmd "$train_cmd" \
--transform-dir ${gmmdir}_ali_$set \
$working_dir/data/train_$set data/train_$set $gmmdir $working_dir/_log $working_dir/_fmllr || exit 1
steps/compute_cmvn_stats.sh --fake \
$working_dir/data/train_$set $working_dir/_log $working_dir/_fmllr || exit 1;
fi
done
for set in dev test; do
if [ ! -d $working_dir/data/$set ]; then
steps/nnet/make_fmllr_feats.sh --nj 8 --cmd "$train_cmd" \
--transform-dir $gmmdir/decode_$set \
$working_dir/data/$set data/$set $gmmdir $working_dir/_log $working_dir/_fmllr || exit 1
steps/compute_cmvn_stats.sh --fake \
$working_dir/data/$set $working_dir/_log $working_dir/_fmllr || exit 1;
fi
done
echo =====================================================================
echo " Training and Cross-Validation Pfiles "
echo =====================================================================
# By default, DNN inputs include 11 frames of fMLLR
for set in tr95 cv05; do
if [ ! -f $working_dir/${set}.pfile.done ]; then
steps_pdnn/build_nnet_pfile.sh --cmd "$train_cmd" --do-concat false \
--norm-vars false --splice-opts "--left-context=5 --right-context=5" \
$working_dir/data/train_$set ${gmmdir}_ali_$set $working_dir || exit 1
touch $working_dir/${set}.pfile.done
fi
done
echo =====================================================================
echo " DNN Pre-training & Fine-tuning "
echo =====================================================================
feat_dim=$(gunzip -c $working_dir/train_tr95.pfile.1.gz |head |grep num_features| awk '{print $2}') || exit 1;
if [ ! -f $working_dir/dnn.ptr.done ]; then
echo "SDA Pre-training"
$cmd $working_dir/log/dnn.ptr.log \
export PYTHONPATH=$PYTHONPATH:`pwd`/pdnn/ \; \
export THEANO_FLAGS=mode=FAST_RUN,device=$gpu,floatX=float32 \; \
$pythonCMD pdnn/cmds/run_SdA.py --train-data "$working_dir/train_tr95.pfile.*.gz,partition=2000m,random=true,stream=false" \
--nnet-spec "$feat_dim:1024:1024:1024:1024:42:1024:$num_pdfs" \
--1stlayer-reconstruct-activation "tanh" \
--wdir $working_dir --param-output-file $working_dir/dnn.ptr \
--ptr-layer-number 4 --epoch-number 5 || exit 1;
touch $working_dir/dnn.ptr.done
fi
if [ ! -f $working_dir/dnn.fine.done ]; then
echo "Fine-tuning DNN"
$cmd $working_dir/log/dnn.fine.log \
export PYTHONPATH=$PYTHONPATH:`pwd`/pdnn/ \; \
export THEANO_FLAGS=mode=FAST_RUN,device=$gpu,floatX=float32 \; \
$pythonCMD pdnn/cmds/run_DNN.py --train-data "$working_dir/train_tr95.pfile.*.gz,partition=2000m,random=true,stream=false" \
--valid-data "$working_dir/train_cv05.pfile.*.gz,partition=600m,random=true,stream=false" \
--nnet-spec "$feat_dim:1024:1024:1024:1024:42:1024:$num_pdfs" \
--ptr-file $working_dir/dnn.ptr --ptr-layer-number 4 \
--lrate "D:0.08:0.5:0.2,0.2:8" \
--wdir $working_dir --kaldi-output-file $working_dir/dnn.nnet || exit 1;
touch $working_dir/dnn.fine.done
fi
( cd $working_dir; ln -s dnn.nnet bnf.nnet )
echo =====================================================================
echo " BNF Feature Generation "
echo =====================================================================
# Combine fMLLRs of train_* sets into train
if [ ! -d $working_dir/data/train ]; then
utils/combine_data.sh $working_dir/data/train $working_dir/data/train_tr95 $working_dir/data/train_cv05
fi
# Dump BNF features
for set in train; do
if [ ! -d $working_dir/data_bnf/${set} ]; then
steps_pdnn/make_bnf_feat.sh --nj 24 --cmd "$train_cmd" \
$working_dir/data_bnf/${set} $working_dir/data/${set} $working_dir $working_dir/_log $working_dir/_bnf || exit 1
# We will normalize BNF features, thus are not providing --fake here. Intuitively, apply CMN over BNF features
# might be redundant. But our experiments on WSJ show gains by doing this.
steps/compute_cmvn_stats.sh \
$working_dir/data_bnf/${set} $working_dir/_log $working_dir/_bnf || exit 1;
fi
done
for set in dev test; do
if [ ! -d $working_dir/data_bnf/${set} ]; then
steps_pdnn/make_bnf_feat.sh --nj 8 --cmd "$train_cmd" \
$working_dir/data_bnf/${set} $working_dir/data/${set} $working_dir $working_dir/_log $working_dir/_bnf || exit 1
# We will normalize BNF features, thus are not providing --fake here. Intuitively, apply CMN over BNF features
# might be redundant. But our experiments on WSJ show gains by doing this.
steps/compute_cmvn_stats.sh \
$working_dir/data_bnf/${set} $working_dir/_log $working_dir/_bnf || exit 1;
fi
done
# Redirect datadir pointing to the BNF dir
datadir=$working_dir/data_bnf
echo =====================================================================
echo " LDA+MLLT Systems over BNFs "
echo =====================================================================
decode_param="--beam 15.0 --lattice-beam 7.0 --acwt 0.04" # decoding parameters differ from MFCC systems
scoring_opts="--min-lmwt 26 --max-lmwt 34"
denlats_param="--acwt 0.05" # Parameters for lattice generation
# LDA+MLLT systems building and decoding
if [ ! -f $working_dir/lda.mllt.done ]; then
steps/train_lda_mllt.sh --cmd "$train_cmd" \
5000 100000 $datadir/train data/lang ${gmmdir}_ali $working_dir/tri4 || exit 1;
# graph_dir=$working_dir/tri4/graph
# $decode_cmd $graph_dir/mkgraph.log \
# utils/mkgraph.sh data/lang_test ${working_dir}/tri4 $graph_dir || exit 1;
# steps/decode.sh --nj 8 --cmd "$decode_cmd" $decode_param --scoring-opts "$scoring_opts" \
# $graph_dir $datadir/dev ${working_dir}/tri4/decode_dev || exit 1;
# steps/decode.sh --nj 11 --cmd "$decode_cmd" $decode_param --scoring-opts "$scoring_opts" \
# $graph_dir $datadir/test ${working_dir}/tri4/decode_test || exit 1;
graph_dir=$working_dir/tri4/graph_bd_tgpr
utils/mkgraph.sh data/lang_test_bd_tgpr ${working_dir}/tri4 $graph_dir || exit 1;
steps/decode.sh --nj 8 --cmd "$decode_cmd" $decode_param --scoring-opts "$scoring_opts" \
$graph_dir $datadir/dev ${working_dir}/tri4/decode_dev_bd_tgpr || exit 1;
steps/decode.sh --nj 11 --cmd "$decode_cmd" $decode_param --scoring-opts "$scoring_opts" \
$graph_dir $datadir/test ${working_dir}/tri4/decode_test_bd_tgpr || exit 1;
touch $working_dir/lda.mllt.done
fi
echo =====================================================================
echo " MMI Systems over BNFs "
echo =====================================================================
# MMI systems building and decoding
scoring_opts="--min-lmwt 18 --max-lmwt 28" # SGMM needs smaller lmwt
if [ ! -f $working_dir/mmi.done ]; then
# steps/align_si.sh --nj 30 --cmd "$train_cmd" \
# $datadir/train data/lang ${working_dir}/tri4 ${working_dir}/tri4_ali || exit 1;
# steps/make_denlats.sh --nj 30 --cmd "$decode_cmd" $denlats_param \
# $datadir/train data/lang ${working_dir}/tri4 ${working_dir}/tri4_denlats || exit 1;
# 4 iterations of MMI
num_mmi_iters=4
steps/train_mmi.sh --cmd "$train_cmd" --boost 0.1 --num-iters $num_mmi_iters \
$datadir/train data/lang $working_dir/tri4_{ali,denlats} $working_dir/tri4_mmi_b0.1 || exit 1;
for iter in 1 2 3 4; do
graph_dir=$working_dir/tri4/graph_bd_tgpr
steps/decode.sh --nj 8 --cmd "$decode_cmd" $decode_param --scoring-opts "$scoring_opts" --iter $iter \
$graph_dir $datadir/dev ${working_dir}/tri4_mmi_b0.1/decode_dev_it${iter}_bd_tgpr || exit 1;
steps/decode.sh --nj 11 --cmd "$decode_cmd" $decode_param --scoring-opts "$scoring_opts" --iter $iter \
$graph_dir $datadir/test ${working_dir}/tri4_mmi_b0.1/decode_test_it${iter}_bd_tgpr || exit 1;
done
touch $working_dir/mmi.done
fi
echo =====================================================================
echo " SGMM Systems over BNFs "
echo =====================================================================
# SGMM system building and decoding
if [ ! -f $working_dir/sgmm.done ]; then
steps/train_ubm.sh --cmd "$train_cmd" \
700 $datadir/train data/lang ${working_dir}/tri4_ali ${working_dir}/ubm5 || exit 1;
steps/train_sgmm2.sh --cmd "$train_cmd" 10000 30000 \
$datadir/train data/lang ${working_dir}/tri4_ali ${working_dir}/ubm5/final.ubm ${working_dir}/sgmm5a || exit 1;
graph_dir=$working_dir/sgmm5a/graph
$decode_cmd $graph_dir/mkgraph.log \
utils/mkgraph.sh data/lang_test ${working_dir}/sgmm5a $graph_dir || exit 1;
steps/decode_sgmm2.sh --stage 7 --nj 8 --cmd "$decode_cmd" --acwt 0.04 --scoring-opts "$scoring_opts" \
$graph_dir $datadir/dev ${working_dir}/sgmm5a/decode_dev || exit 1;
steps/decode_sgmm2.sh --stage 7 --nj 11 --cmd "$decode_cmd" --acwt 0.04 --scoring-opts "$scoring_opts" \
$graph_dir $datadir/test ${working_dir}/sgmm5a/decode_test || exit 1;
touch $working_dir/sgmm.done
fi
echo =====================================================================
echo " MMI-SGMM over BNFs "
echo =====================================================================
# Now discriminatively train the SGMM system
if [ ! -f $working_dir/mmi.sgmm.done ]; then
steps/align_sgmm2.sh --nj 30 --cmd "$train_cmd" \
$datadir/train data/lang ${working_dir}/sgmm5a ${working_dir}/sgmm5a_ali || exit 1;
# Reduce the beam down to 10 to get acceptable decoding speed.
steps/make_denlats_sgmm2.sh --nj 30 --beam 9.0 --lattice-beam 6 --cmd "$decode_cmd" $denlats_param \
$datadir/train data/lang ${working_dir}/sgmm5a ${working_dir}/sgmm5a_denlats || exit 1;
steps/train_mmi_sgmm2.sh --cmd "$decode_cmd" --boost 0.1 \
$datadir/train data/lang $working_dir/sgmm5a_{ali,denlats} ${working_dir}/sgmm5a_mmi_b0.1 || exit 1;
for iter in 1 2 3 4; do
steps/decode_sgmm2_rescore.sh --cmd "$decode_cmd" --iter $iter \
data/lang_test $datadir/dev ${working_dir}/sgmm5a/decode_dev ${working_dir}/sgmm5a_mmi_b0.1/decode_dev_it$iter || exit 1;
steps/decode_sgmm2_rescore.sh --cmd "$decode_cmd" --iter $iter \
data/lang_test $datadir/test ${working_dir}/sgmm5a/decode_test ${working_dir}/sgmm5a_mmi_b0.1/decode_test_it$iter || exit 1;
done
touch $working_dir/mmi.sgmm.done
fi
echo "Finish !!"
|
weiwchu/kaldipdnn
|
run_tedlium/run-bnf-tandem-fbank.sh
|
Shell
|
apache-2.0
| 13,457 |
wget --quiet \
--method POST \
--header 'content-type: application/x-www-form-urlencoded' \
--body-data 'foo=bar&hello=world' \
--output-document \
- http://mockbin.com/har
|
postmanlabs/httpsnippet
|
test/fixtures/output/shell/wget/application-form-encoded.sh
|
Shell
|
mit
| 183 |
#!/bin/sh
set -x
#. ../tools/error_handler
#trap 'error ${LINENO}' ERR
target=${TARGET-/kb/runtime}
if [[ $# -gt 0 ]] ; then
target=$1
shift
fi
opts=""
if [[ -x /usr/libexec/java_home ]] ; then
if /usr/libexec/java_home ; then
export JAVA_HOME=`/usr/libexec/java_home`
else
opts="$opts --without-java"
fi
opts="$opts --without-ruby"
opts="$opts --without-php"
elif [[ -d /Library/Java/Home ]] ; then
export JAVA_HOME=/Library/Java/Home
opts="$opts --without-ruby"
else
export JAVA_HOME=$runtime/java
fi
export ANT_HOME=$target/ant
export THRIFT_HOME=$target/thrift
export PATH=${JAVA_HOME}/bin:${ANT_HOME}/bin:$target/bin:${THRIFT_HOME}/bin:${PATH}
export PY_PREFIX=$target
#vers=0.8.0
#url=http://www.kbase.us/docs/build/thrift-$vers.tar.gz
vers=0.9.1
url=http://apache.spinellicreations.com/thrift/$vers/thrift-$vers.tar.gz
tar=thrift-$vers.tar.gz
curl -o $tar -L $url
rm -rf thrift-$vers
tar zxf $tar
cd thrift-$vers
./configure --prefix=$target/thrift-$vers $opts
make
make install
rm -f $target/thrift
ln -s $target/thrift-$vers $target/thrift
|
kbase/bootstrap
|
kb_thrift_runtime/thrift_build.sh
|
Shell
|
mit
| 1,074 |
#!/bin/bash
FN="cancerdata_1.30.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.13/data/experiment/src/contrib/cancerdata_1.30.0.tar.gz"
"https://bioarchive.galaxyproject.org/cancerdata_1.30.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-cancerdata/bioconductor-cancerdata_1.30.0_src_all.tar.gz"
)
MD5="0664ae0c1510bd3238833227dfb8b485"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
phac-nml/bioconda-recipes
|
recipes/bioconductor-cancerdata/post-link.sh
|
Shell
|
mit
| 1,302 |
#!/bin/bash
case "$1" in
opteron) echo "running tests on opteron"
THE_LOCKS="HCLH TTAS ARRAY MCS TICKET HTICKET MUTEX SPINLOCK CLH"
num_cores=48
platform_def="-DOPTERON"
make="make"
freq=2100000000
platform=opteron
prog_prefix="numactl --physcpubind=0 ../"
;;
opteron_optimize) echo "running tests on opteron"
THE_LOCKS="HCLH TTAS ARRAY MCS TICKET HTICKET MUTEX SPINLOCK CLH"
num_cores=48
optimize="-DOPTERON_OPTIMIZE"
platform_def="-DOPTERON"
make="make"
freq=2100000000
platform=opteron
prog_prefix="numactl --physcpubind=0 ../"
;;
xeon) echo "running tests on xeon"
THE_LOCKS="HCLH TTAS ARRAY MCS TICKET HTICKET MUTEX SPINLOCK CLH"
num_cores=80
platform_def="-DXEON"
freq=2130000000
make="make"
platform=xeon
prog_prefix="numactl --physcpubind=1 ../"
;;
niagara) echo "running tests on niagara"
THE_LOCKS="TTAS ARRAY MCS TICKET MUTEX SPINLOCK CLH"
ALTERNATE=-DALTERNATE_SOCKETS
num_cores=64
platform_def="-DSPARC"
freq=1200000000
make="make"
platform=niagara
prog_prefix="../"
;;
tilera) echo "running tests on tilera"
THE_LOCKS="TTAS ARRAY MCS TICKET MUTEX SPINLOCK CLH"
num_cores=36
platform_def="-DTILERA"
freq=1200000000
make="make"
platform=tilera
prog_prefix="../run ../"
;;
*) echo "Program format ./run_all platform, where plafrom in opteron, xeon, niagara, tilera"
exit;
;;
esac
rm correctness.out
for prefix in ${THE_LOCKS}
do
cd ..; LOCK_VERSION=-DUSE_${prefix}_LOCKS PRIMITIVE=-DTEST_CAS OPTIMIZE=${optimize} PLATFORM=${platform_def} ${make} clean all; cd scripts;
echo ${prefix} >> correctness.out
${prog_prefix}test_correctness -n ${num_cores} -d 1000 >> correctness.out
done
|
dailypips/libslock
|
scripts/correctness.sh
|
Shell
|
mit
| 1,748 |
#!/bin/sh
set -e
case "$(uname)" in
Darwin)
LIBTOOLIZE=${LIBTOOLIZE:-glibtoolize}
;;
*)
LIBTOOLIZE=${LIBTOOLIZE:-libtoolize}
;;
esac
AUTORECONF=${AUTORECONF:-autoreconf}
ACLOCAL=${ACLOCAL:-aclocal}
AUTOCONF=${AUTOCONF:-autoconf}
AUTOHEADER=${AUTOHEADER:-autoheader}
AUTOMAKE=${AUTOMAKE:-automake}
# Check we have all tools installed
check_command() {
command -v "${1}" > /dev/null 2>&1 || {
>&2 echo "autogen.sh: could not find \`$1'. \`$1' is required to run autogen.sh."
exit 1
}
}
check_command "$LIBTOOLIZE"
check_command "$AUTORECONF"
check_command "$ACLOCAL"
check_command "$AUTOCONF"
check_command "$AUTOHEADER"
check_command "$AUTOMAKE"
# Absence of pkg-config or misconfiguration can make some odd error
# messages, we check if it is installed correctly. See:
# https://blogs.oracle.com/mandy/entry/autoconf_weirdness
#
# We cannot just check for pkg-config command, we need to check for
# PKG_* macros. The pkg-config command can be defined in ./configure,
# we cannot tell anything when not present.
check_pkg_config() {
grep -q '^AC_DEFUN.*PKG_CHECK_MODULES' aclocal.m4 || {
cat <<EOF >&2
autogen.sh: could not find PKG_CHECK_MODULES macro.
Either pkg-config is not installed on your system or
\`pkg.m4' is missing or not found by aclocal.
If \`pkg.m4' is installed at an unusual location, re-run
\`autogen.sh' by setting \`ACLOCAL_FLAGS':
ACLOCAL_FLAGS="-I <prefix>/share/aclocal" ./autogen.sh
EOF
exit 1
}
}
echo "autogen.sh: reconfigure with autoreconf"
${AUTORECONF} -vif -I m4 || {
echo "autogen.sh: autoreconf has failed ($?), let's do it manually"
[ -f ./configure.ac ] || [ -f ./configure.in ] || continue
echo "autogen.sh: configure `basename $PWD`"
${ACLOCAL} -I m4 ${ACLOCAL_FLAGS}
check_pkg_config
${LIBTOOLIZE} --automake --copy --force
${ACLOCAL} -I m4 ${ACLOCAL_FLAGS}
${AUTOCONF} --force
${AUTOHEADER}
${AUTOMAKE} --add-missing --copy --force-missing
}
bin/configure-help-replace.sh
echo "autogen.sh: for the next step, run './configure' [or './configure --help' to check available options]"
exit 0
|
vanzylad/pmacct-static-analyser-fixes
|
autogen.sh
|
Shell
|
gpl-2.0
| 2,180 |
# 96_collect_MC_serviceguard_infos.sh
# Purpose of this script is to gather MC/SG related config files
# in order to prepare a smooth rolling upgrade
# List files and directories in SGLX_FILES
SGLX_FILES="/etc/hostname
/etc/vconsole.conf
/etc/locale.conf
/etc/sysconfig/keyboard
/etc/sysconfig/network-scripts/ifcfg*
/etc/sysconfig/network/ifcfg*
/etc/sysconfig/network
/etc/hosts
/etc/modprobe.conf
/etc/modules.conf
/etc/cmcluster.conf
/etc/hp_qla2x00.conf
/etc/lpfc.conf
/etc/ntp.conf
/etc/resolv.conf
/usr/local/cmcluster/conf/*/*
/opt/cmcluster/conf/*/*"
# Phase 1 : does sglx soft is installed?
# on RH path is /usr/local/cmcluster; on SuSe path is /opt/cmcluster
[ -d /usr/local/cmcluster/conf -o -d /opt/cmcluster/conf ] || return
# Phase 2: create a /etc/rear/recovery/sglx directory
mkdir -p $v -m755 "$VAR_DIR/recovery/sglx" >&2
StopIfError "Could not create sglx configuration directory: $VAR_DIR/recovery/sglx"
SGLX_DIR="$VAR_DIR/recovery/sglx"
for sgf in $SGLX_FILES
do
if [ `dirname ${sgf}` != . ]; then
mkdir -p $v ${SGLX_DIR}/`dirname ${sgf}` >&2
fi
if [ -f ${sgf} ]; then
cp $v ${sgf} ${SGLX_DIR}${sgf} >&2
fi
done
|
krissi/rear
|
usr/share/rear/rescue/GNU/Linux/96_collect_MC_serviceguard_infos.sh
|
Shell
|
gpl-2.0
| 1,146 |
# $Id$
#
# rc-addon-script for plugin audiorecorder
#
# Matthias Schwarzott <[email protected]>
: ${AUDIORECORDER_DIR:=/var/vdr/audiorecorder}
plugin_pre_vdr_start() {
add_plugin_param "--recdir=${AUDIORECORDER_DIR}"
add_plugin_param "--debug=0"
}
|
DmitriyHetman/gentoo
|
media-plugins/vdr-audiorecorder/files/rc-addon.sh
|
Shell
|
gpl-3.0
| 251 |
#!/bin/bash
heat stack-create $1 -f bundle-trusty-lamp.heat.yml
|
cloudwatt/applications
|
bundle-trusty-lamp/stack-start.sh
|
Shell
|
gpl-3.0
| 65 |
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# kubernetes-e2e-{gce, gke, gke-ci} jobs: This script is triggered by
# the kubernetes-build job, or runs every half hour. We abort this job
# if it takes more than 75m. As of initial commit, it typically runs
# in about half an hour.
#
# The "Workspace Cleanup Plugin" is installed and in use for this job,
# so the ${WORKSPACE} directory (the current directory) is currently
# empty.
set -o errexit
set -o nounset
set -o pipefail
set -o xtrace
if [[ "${CIRCLECI:-}" == "true" ]]; then
JOB_NAME="circleci-${CIRCLE_PROJECT_USERNAME}-${CIRCLE_PROJECT_REPONAME}"
BUILD_NUMBER=${CIRCLE_BUILD_NUM}
WORKSPACE=`pwd`
else
# Jenkins?
export HOME=${WORKSPACE} # Nothing should want Jenkins $HOME
fi
# Additional parameters that are passed to ginkgo runner.
GINKGO_TEST_ARGS=""
if [[ "${PERFORMANCE:-}" == "true" ]]; then
if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then
export MASTER_SIZE="m3.xlarge"
else
export MASTER_SIZE="n1-standard-4"
fi
export NUM_MINIONS="100"
GINKGO_TEST_ARGS="--ginkgo.focus=\[Performance suite\] "
else
if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then
export MASTER_SIZE="t2.small"
else
export MASTER_SIZE="g1-small"
fi
export NUM_MINIONS="2"
fi
# Unlike the kubernetes-build script, we expect some environment
# variables to be set. We echo these immediately and presume "set -o
# nounset" will force the caller to set them: (The first several are
# Jenkins variables.)
echo "JOB_NAME: ${JOB_NAME}"
echo "BUILD_NUMBER: ${BUILD_NUMBER}"
echo "WORKSPACE: ${WORKSPACE}"
echo "KUBERNETES_PROVIDER: ${KUBERNETES_PROVIDER}" # Cloud provider
echo "E2E_CLUSTER_NAME: ${E2E_CLUSTER_NAME}" # Name of the cluster (e.g. "e2e-test-jenkins")
echo "E2E_NETWORK: ${E2E_NETWORK}" # Name of the network (e.g. "e2e")
echo "E2E_ZONE: ${E2E_ZONE}" # Name of the GCE zone (e.g. "us-central1-f")
echo "E2E_OPT: ${E2E_OPT}" # hack/e2e.go options
echo "E2E_SET_CLUSTER_API_VERSION: ${E2E_SET_CLUSTER_API_VERSION:-<not set>}" # optional, for GKE, set CLUSTER_API_VERSION to git hash
echo "--------------------------------------------------------------------------------"
# AWS variables
export KUBE_AWS_INSTANCE_PREFIX=${E2E_CLUSTER_NAME}
export KUBE_AWS_ZONE=${E2E_ZONE}
# GCE variables
export INSTANCE_PREFIX=${E2E_CLUSTER_NAME}
export KUBE_GCE_ZONE=${E2E_ZONE}
export KUBE_GCE_NETWORK=${E2E_NETWORK}
# GKE variables
export CLUSTER_NAME=${E2E_CLUSTER_NAME}
export ZONE=${E2E_ZONE}
export KUBE_GKE_NETWORK=${E2E_NETWORK}
export PATH=${PATH}:/usr/local/go/bin
export KUBE_SKIP_CONFIRMATIONS=y
# E2E Control Variables
export E2E_UP="${E2E_UP:-true}"
export E2E_TEST="${E2E_TEST:-true}"
export E2E_DOWN="${E2E_DOWN:-true}"
if [[ "${E2E_UP,,}" == "true" ]]; then
if [[ ${KUBE_RUN_FROM_OUTPUT:-} =~ ^[yY]$ ]]; then
echo "Found KUBE_RUN_FROM_OUTPUT=y; will use binaries from _output"
cp _output/release-tars/kubernetes*.tar.gz .
else
echo "Pulling binaries from GCS"
if [[ $(find . | wc -l) != 1 ]]; then
echo $PWD not empty, bailing!
exit 1
fi
# Tell kube-up.sh to skip the update, it doesn't lock. An internal
# gcloud bug can cause racing component updates to stomp on each
# other.
export KUBE_SKIP_UPDATE=y
sudo flock -x -n /var/run/lock/gcloud-components.lock -c "gcloud components update -q" || true
# For GKE, we can get the server-specified version.
if [[ ${JENKINS_USE_SERVER_VERSION:-} =~ ^[yY]$ ]]; then
# We'll pull our TARs for tests from the release bucket.
bucket="release"
# Get the latest available API version from the GKE apiserver.
# Trim whitespace out of the error message. This gives us something
# like: ERROR:(gcloud.alpha.container.clusters.create)ResponseError:
# code=400,message=cluster.cluster_api_versionmustbeoneof:
# 0.15.0,0.16.0.
# The command should error, so we throw an || true on there.
msg=$(gcloud alpha container clusters create this-wont-work \
--zone=us-central1-f --cluster-api-version=0.0.0 2>&1 \
| tr -d '[[:space:]]') || true
# Strip out everything before the final colon, which gives us just
# the allowed versions; something like "0.15.0,0.16.0." or "0.16.0."
msg=${msg##*:}
# Take off the final period, which gives us just comma-separated
# allowed versions; something like "0.15.0,0.16.0" or "0.16.0"
msg=${msg%%\.}
# Split the version string by comma and read into an array, using
# the last element as the githash, which will be like "v0.16.0".
IFS=',' read -a varr <<< "${msg}"
githash="v${varr[${#varr[@]} - 1]}"
else
# The "ci" bucket is for builds like "v0.15.0-468-gfa648c1"
bucket="ci"
# The "latest" version picks the most recent "ci" or "release" build.
version_file="latest"
if [[ ${JENKINS_USE_RELEASE_TARS:-} =~ ^[yY]$ ]]; then
# The "release" bucket is for builds like "v0.15.0"
bucket="release"
if [[ ${JENKINS_USE_STABLE:-} =~ ^[yY]$ ]]; then
# The "stable" version picks the most recent "release" build.
version_file="stable"
fi
fi
githash=$(gsutil cat gs://kubernetes-release/${bucket}/${version_file}.txt)
fi
# At this point, we want to have the following vars set:
# - bucket
# - githash
gsutil -m cp gs://kubernetes-release/${bucket}/${githash}/kubernetes.tar.gz gs://kubernetes-release/${bucket}/${githash}/kubernetes-test.tar.gz .
fi
if [[ ! "${CIRCLECI:-}" == "true" ]]; then
# Copy GCE keys so we don't keep cycling them.
# To set this up, you must know the <project>, <zone>, and <instance> that
# on which your jenkins jobs are running. Then do:
#
# # Get into the instance.
# $ gcloud compute ssh --project="<prj>" ssh --zone="<zone>" <instance>
#
# # Generate a key by ssh'ing into itself, then exit.
# $ gcloud compute ssh --project="<prj>" ssh --zone="<zone>" <instance>
# $ ^D
#
# # Copy the keys to the desired location, e.g. /var/lib/jenkins/gce_keys/
# $ sudo mkdir -p /var/lib/jenkins/gce_keys/
# $ sudo cp ~/.ssh/google_compute_engine /var/lib/jenkins/gce_keys/
# $ sudo cp ~/.ssh/google_compute_engine.pub /var/lib/jenkins/gce_keys/
#
# Move the permissions to jenkins.
# $ sudo chown -R jenkins /var/lib/jenkins/gce_keys/
# $ sudo chgrp -R jenkins /var/lib/jenkins/gce_keys/
if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then
echo "Skipping SSH key copying for AWS"
else
mkdir -p ${WORKSPACE}/.ssh/
cp /var/lib/jenkins/gce_keys/google_compute_engine ${WORKSPACE}/.ssh/
cp /var/lib/jenkins/gce_keys/google_compute_engine.pub ${WORKSPACE}/.ssh/
fi
fi
md5sum kubernetes*.tar.gz
tar -xzf kubernetes.tar.gz
tar -xzf kubernetes-test.tar.gz
# Set by GKE-CI to change the CLUSTER_API_VERSION to the git version
if [[ ! -z ${E2E_SET_CLUSTER_API_VERSION:-} ]]; then
export CLUSTER_API_VERSION=$(echo ${githash} | cut -c 2-)
elif [[ ${JENKINS_USE_RELEASE_TARS:-} =~ ^[yY]$ ]]; then
release=$(gsutil cat gs://kubernetes-release/release/${version_file}.txt | cut -c 2-)
export CLUSTER_API_VERSION=${release}
fi
fi
cd kubernetes
# Have cmd/e2e run by goe2e.sh generate JUnit report in ${WORKSPACE}/junit*.xml
export E2E_REPORT_DIR=${WORKSPACE}
### Set up ###
if [[ "${E2E_UP,,}" == "true" ]]; then
go run ./hack/e2e.go ${E2E_OPT} -v --down
go run ./hack/e2e.go ${E2E_OPT} -v --up
go run ./hack/e2e.go -v --ctl="version --match-server-version=false"
fi
### Run tests ###
# Jenkins will look at the junit*.xml files for test failures, so don't exit
# with a nonzero error code if it was only tests that failed.
if [[ "${E2E_TEST,,}" == "true" ]]; then
go run ./hack/e2e.go ${E2E_OPT} -v --test --test_args="${GINKGO_TEST_ARGS}--ginkgo.noColor" || true
fi
### Clean up ###
if [[ "${E2E_DOWN,,}" == "true" ]]; then
go run ./hack/e2e.go ${E2E_OPT} -v --down
fi
|
bcbroussard/kubernetes
|
hack/jenkins/e2e.sh
|
Shell
|
apache-2.0
| 9,160 |
#/bin/sh -f
# things to do for travis-ci in the before_install section
if ( test "`uname -s`" = "Darwin" )
then
#cmake v2.8.12 is installed on the Mac workers now
#brew update
#brew install cmake
echo
else
#install a newer cmake since at this time Travis only has version 2.8.7
sudo add-apt-repository --yes ppa:kalakris/cmake
sudo apt-get update -qq
sudo apt-get install cmake
fi
|
avalentino/PyTables
|
hdf5-blosc/travis-before-install.sh
|
Shell
|
bsd-3-clause
| 399 |
#!/bin/sh
# Automate the compilation of the various locale PO files by automatically
# generating them at night.
#
projname=boinctrunk
projdir=/home/boincadm/pootle/po/$projname
cd $projdir
# Update anything that needs updating
svn update
# Iterrate through the various PO files looking for those that need to be added to SVN.
#
for file in `find -name 'BOINC-Manager.po'` ; do
dir=`dirname $file`
locale=`basename $dir`
template_name=${projdir}/${locale}/BOINC-Manager
# Add any missing PO files to SVN
svn add ${template_name}.po > /dev/null 2> /dev/null
svn propset svn:mime-type 'text/plain;charset=UTF-8' ${template_name}.po > /dev/null 2> /dev/null
done
# Iterrate through the various PO files looking for those that need to be added to SVN.
#
for file in `find -name 'BOINC-Client.po'` ; do
dir=`dirname $file`
locale=`basename $dir`
template_name=${projdir}/${locale}/BOINC-Client
# Add any missing PO files to SVN
svn add ${template_name}.po > /dev/null 2> /dev/null
svn propset svn:mime-type 'text/plain;charset=UTF-8' ${template_name}.po > /dev/null 2> /dev/null
done
# Iterrate through the various PO files looking for those that need to be added to SVN.
#
for file in `find -name 'BOINC-Web.po'` ; do
dir=`dirname $file`
locale=`basename $dir`
template_name=${projdir}/${locale}/BOINC-Web
# Add any missing PO files to SVN
svn add ${template_name}.po > /dev/null 2> /dev/null
svn propset svn:mime-type 'text/plain;charset=UTF-8' ${template_name}.po > /dev/null 2> /dev/null
done
# Iterrate through the various PO files looking for those that need to be compiled.
#
for file in `find -name 'BOINC-Manager.po'` ; do
dir=`dirname $file`
locale=`basename $dir`
template_name=${projdir}/${locale}/BOINC-Manager
if test ${template_name}.po -nt ${template_name}.mo
then
# Compile the PO file into an MO file.
pocompile ${template_name}.po ${template_name}.mo
# Add any new MO files to SVN
svn add ${template_name}.mo > /dev/null 2> /dev/null
# Touch each file to adjust timestamps
touch ${template_name}.po
touch ${template_name}.mo
fi
done
# Iterrate through the various PO files looking for those that need to be compiled.
#
for file in `find -name 'BOINC-Client.po'` ; do
dir=`dirname $file`
locale=`basename $dir`
template_name=${projdir}/${locale}/BOINC-Client
if test ${template_name}.po -nt ${template_name}.mo
then
# Compile the PO file into an MO file.
pocompile ${template_name}.po ${template_name}.mo > /dev/null 2> /dev/null
# Add any new MO files to SVN
svn add ${template_name}.mo > /dev/null 2> /dev/null
# Touch each file to adjust timestamps
touch ${template_name}.po
touch ${template_name}.mo
fi
done
# Iterrate through the various PO files looking for those that need to be compiled.
#
for file in `find -name 'BOINC-Web.po'` ; do
dir=`dirname $file`
locale=`basename $dir`
template_name=${projdir}/${locale}/BOINC-Web
if test ${template_name}.po -nt ${template_name}.mo
then
# Compile the PO file into an MO file.
pocompile ${template_name}.po ${template_name}.mo > /dev/null 2> /dev/null
# Add any new MO files to SVN
svn add ${template_name}.mo > /dev/null 2> /dev/null
# Touch each file to adjust timestamps
touch ${template_name}.po
touch ${template_name}.mo
fi
done
# Determine if we need to update the various languages using the templates.
# This will be done by the use of a tag file which should have a matching
# timestamp as the template files. If the timestamps do not match update all
# languages.
for file in `find -name '*.pot'` ; do
template_rootname=`basename $file .pot`
template_name=${projdir}/templates/${template_rootname}
# Check to see if the file exists, if not create it
if test ! -e ${template_name}.flag
then
cp ${template_name}.pot ${template_name}.flag
fi
# If the modification timestamps don't match then update all the languages
if test ${template_name}.pot -nt ${template_name}.flag
then
execute_update=true
fi
done
if test "${execute_update}" = "true"
then
for file in `find -name '*.po'` ; do
dir=`dirname $file`
locale=`basename $dir`
po_name=`basename $file .po`
msgmerge --update ${locale}/${po_name}.po templates/${po_name}.pot
done
fi
for file in `find -name '*.pot'` ; do
template_rootname=`basename $file .pot`
template_name=${projdir}/templates/${template_rootname}
# Touch each file to adjust timestamps
touch ${template_name}.pot
touch ${template_name}.flag
done
# Commit any changes to SVN
svn commit -m 'Update Translations'
exit 0
|
zonca/boinc
|
locale/updatetrans.sh
|
Shell
|
gpl-3.0
| 4,702 |
#!/bin/bash
command="$*"
echo ${command}
# launch the command in the background
eval ${command} &
# get the PID
PID=$!
memlogfile="mem_evolution_${PID}.log"
echo "#memory evolution of process ${PID}; columns indicate active children" > ${memlogfile}
echo "#command line: ${command}" >> ${memlogfile}
child_pid_list=
# finds out all the (recursive) child process starting from a parent
# output includes the parent
# output is saves
childprocs() {
local parent=$1
if [ "$parent" ] ; then
child_pid_list="$child_pid_list $parent"
for childpid in $(pgrep -P ${parent}); do
childprocs $childpid
done;
fi
}
# while this PID exists we sample
while [ 1 ] ; do
child_pid_list=
childprocs ${PID}
# sum up memory from all child processes
mem=`for pid in $child_pid_list; do cat /proc/$pid/smaps 2>/dev/null | awk -v pid=$pid '/Pss/{mem+=$2} END {print mem/1024.}'; done | tr '\n' ' '`
echo "${mem}" >> ${memlogfile}
# check if the job is still there
ps -p $PID > /dev/null
[ $? == 1 ] && echo "Job finished; Exiting " && break
# take a measurement at some defined rate (can be customized)
sleep ${JOBUTILS_MONITORMEM_SLEEP:-0.005}
done
# record return code of original command
wait ${PID}
RC=$?
# print summary
MAXMEM=`awk '/^[0-9]/{ for(i=1; i<=NF;i++) j+=$i; print j; j=0 }' ${memlogfile} | awk 'BEGIN {m = 0} //{if($1>m){m=$1}} END {print m}'`
AVGMEM=`awk '/^[0-9]/{ for(i=1; i<=NF;i++) j+=$i; print j; j=0 }' ${memlogfile} | awk 'BEGIN {a = 0;c = 0} //{c=c+1;a=a+$1} END {print a/c}'`
echo "PROCESS MAX MEM = ${MAXMEM}"
echo "PROCESS AVG MEM = ${AVGMEM}"
exit ${RC}
|
noferini/AliceO2
|
Utilities/Tools/monitor-mem.sh
|
Shell
|
gpl-3.0
| 1,617 |
#!/bin/bash
echo "dockersh installer - installs prebuilt dockersh binary"
echo ""
echo "To install dockersh"
echo " docker run -v /usr/local/bin:/target thiscontainer"
echo "If you're using the publicly available (built from source) container, this is:"
echo " docker run -v /usr/local/bin:/target yelp/dockersh"
echo ""
if [ -d "/target" ];then
echo "GOING TO DO INSTALL IN 5 SECONDS, Ctrl-C to abort"
sleep 5
rm -f /target/dockersh
cp -a /dockersh /target/dockersh
else
echo "No /target directory found, not installing"
fi
|
mehulsbhatt/dockersh
|
installer.sh
|
Shell
|
apache-2.0
| 542 |
#!/bin/bash
FN="xlaevis2cdf_2.18.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.14/data/annotation/src/contrib/xlaevis2cdf_2.18.0.tar.gz"
"https://bioarchive.galaxyproject.org/xlaevis2cdf_2.18.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-xlaevis2cdf/bioconductor-xlaevis2cdf_2.18.0_src_all.tar.gz"
)
MD5="9e4a80d66397299b4e66a8d6715ca4aa"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
cokelaer/bioconda-recipes
|
recipes/bioconductor-xlaevis2cdf/post-link.sh
|
Shell
|
mit
| 1,307 |
#!/usr/bin/env bash
. "test/testlib.sh"
reponame="submodule-test-repo"
submodname="submodule-test-submodule"
begin_test "submodule local git dir"
(
set -e
setup_remote_repo "$reponame"
setup_remote_repo "$submodname"
clone_repo "$submodname" submod
mkdir dir
echo "sub module" > dir/README
git add dir/README
git commit -a -m "submodule readme"
git push origin master
clone_repo "$reponame" repo
git submodule add "$GITSERVER/$submodname" sub
git submodule update
git add .gitmodules sub
git commit -m "add submodule"
git push origin master
grep "sub module" sub/dir/README || {
echo "submodule not setup correctly?"
cat sub/dir/README
exit 1
}
)
end_test
begin_test "submodule env"
(
set -e
# using the local clone from the above test
cd repo
git lfs env | tee env.log
grep "Endpoint=$GITSERVER/$reponame.git/info/lfs (auth=none)$" env.log
grep "LocalWorkingDir=$(native_path_escaped "$TRASHDIR/repo$")" env.log
grep "LocalGitDir=$(native_path_escaped "$TRASHDIR/repo/.git$")" env.log
grep "LocalGitStorageDir=$(native_path_escaped "$TRASHDIR/repo/.git$")" env.log
grep "LocalMediaDir=$(native_path_escaped "$TRASHDIR/repo/.git/lfs/objects$")" env.log
grep "TempDir=$(native_path_escaped "$TRASHDIR/repo/.git/lfs/tmp$")" env.log
cd .git
echo "./.git"
git lfs env | tee env.log
cat env.log
grep "Endpoint=$GITSERVER/$reponame.git/info/lfs (auth=none)$" env.log
grep "LocalWorkingDir=$" env.log
grep "LocalGitDir=$(native_path_escaped "$TRASHDIR/repo/.git$")" env.log
grep "LocalGitStorageDir=$(native_path_escaped "$TRASHDIR/repo/.git$")" env.log
grep "LocalMediaDir=$(native_path_escaped "$TRASHDIR/repo/.git/lfs/objects$")" env.log
grep "TempDir=$(native_path_escaped "$TRASHDIR/repo/.git/lfs/tmp$")" env.log
cd ../sub
echo "./sub"
git lfs env | tee env.log
grep "Endpoint=$GITSERVER/$submodname.git/info/lfs (auth=none)$" env.log
grep "LocalWorkingDir=$(native_path_escaped "$TRASHDIR/repo/sub$")" env.log
grep "LocalGitDir=$(native_path_escaped "$TRASHDIR/repo/.git/modules/sub$")" env.log
grep "LocalGitStorageDir=$(native_path_escaped "$TRASHDIR/repo/.git/modules/sub$")" env.log
grep "LocalMediaDir=$(native_path_escaped "$TRASHDIR/repo/.git/modules/sub/lfs/objects$")" env.log
grep "TempDir=$(native_path_escaped "$TRASHDIR/repo/.git/modules/sub/lfs/tmp$")" env.log
cd dir
echo "./sub/dir"
git lfs env | tee env.log
grep "Endpoint=$GITSERVER/$submodname.git/info/lfs (auth=none)$" env.log
grep "LocalWorkingDir=$(native_path_escaped "$TRASHDIR/repo/sub$")" env.log
grep "LocalGitDir=$(native_path_escaped "$TRASHDIR/repo/.git/modules/sub$")" env.log
grep "LocalGitStorageDir=$(native_path_escaped "$TRASHDIR/repo/.git/modules/sub$")" env.log
grep "LocalMediaDir=$(native_path_escaped "$TRASHDIR/repo/.git/modules/sub/lfs/objects$")" env.log
grep "TempDir=$(native_path_escaped "$TRASHDIR/repo/.git/modules/sub/lfs/tmp$")" env.log
)
end_test
|
Jericho25/-git-lfs_miilkyway
|
test/test-submodule.sh
|
Shell
|
mit
| 2,986 |
#!/bin/bash
set -e
function pushdq() { pushd "$1" > /dev/null; }
function popdq() { popd > /dev/null; }
function error() { echo "$*" >&2; }
BIN_DIR=$(dirname "$0")
ROOT_DIR="$BIN_DIR/.."
TMP_DIR="$ROOT_DIR/_tmp"
SITE_DIR="$ROOT_DIR/_site"
SASS_CACHE_DIR="$ROOT_DIR/.sass-cache"
DATACACHE_DIR="$TMP_DIR/datacache"
GITHUB_DIR="$TMP_DIR/restcache/github"
LANYRD_DIR="$TMP_DIR/lanyrd"
DEPLOY_REPO='[email protected]:arquillian/arquillian.github.com.git'
CLEAN=0
KEEP=0
PUSH=0
MESSAGE='manual publish'
while getopts "ckpm:r:" option
do
case $option in
c) CLEAN=1 ;;
k) KEEP=1 ;;
p) PUSH=1 ;;
m) MESSAGE=$OPTARG ;;
r) DEPLOY_DIR=$OPTARG ;;
esac
done
if [ -z $DEPLOY_DIR ]; then
DEPLOY_DIR="$ROOT_DIR/_deploy"
if [[ ! -d "$DEPLOY_DIR/.git" ]]; then
error "Specify the path to the clone of $DEPLOY_REPO"
exit 1
fi
else
if [[ ! -d "$DEPLOY_DIR/.git" ]]; then
error "Not a git repository: $DEPLOY_DIR"
exit 1
fi
fi
set -e
pushdq $DEPLOY_DIR
if ! git remote -v | grep -qF "$DEPLOY_REPO"; then
error "Not a $DEPLOY_REPO clone: $DEPLOY_DIR"
exit 1
fi
popdq
cd $ROOT_DIR
if [[ `git status -s | wc -l` -gt 0 ]]; then
error "Please commit these local changes before publishing:"
error `git status -s`
exit 1
fi
if [[ `git diff upstream/develop | wc -l` -gt 0 ]]; then
error "Please push these local changes before publishing:"
error `git log upstream/develop..`
exit 1
fi
# TODO check if github repository has been updated since the contributions file was written, then nuke the contributions file
#pushdq $GITHUB_DIR
#rm -f *-contributors.json
#popdq
if [ $CLEAN -eq 1 ]; then
pushdq $LANYRD_DIR
rm -f search-*.html
popdq
rm -rf $DATACACHE_DIR
fi
if [ $KEEP -eq 0 ]; then
#rm -rf $SITE_DIR
#rm -rf $SASS_CACHE_DIR
awestruct --force -g -P production
else
awestruct -P production -g
fi
pushdq $DEPLOY_DIR
git pull origin master
popdq
rsync -a --delete --exclude='.git' "$SITE_DIR/" "$DEPLOY_DIR/"
pushdq $DEPLOY_DIR
git add -A .
git commit -m "$MESSAGE"
if [ $PUSH -eq 1 ]; then
git push origin master
fi
popdq
exit 0
|
oscerd/arquillian.github.com
|
_bin/deploy-production.sh
|
Shell
|
apache-2.0
| 2,101 |
#!/bin/sh
../../bin/pyjsbuild $@ Toggle
|
minghuascode/pyj
|
examples/toggle/build.sh
|
Shell
|
apache-2.0
| 40 |
#!/bin/bash
#
##################################################################################################################
# Written to be used on 64 bits computers
# Author : Erik Dubois
# Website : http://www.erikdubois.be
##################################################################################################################
##################################################################################################################
#
# DO NOT JUST RUN THIS. EXAMINE AND JUDGE. RUN AT YOUR OWN RISK.
#
##################################################################################################################
package="atom-editor-bin"
command="atom"
#----------------------------------------------------------------------------------
#checking if application is already installed or else install with aur helpers
if pacman -Qi $package &> /dev/null; then
echo "################################################################"
echo "################## "$package" is already installed"
echo "################################################################"
else
#checking which helper is installed
if pacman -Qi packer &> /dev/null; then
echo "Installing with packer"
packer -S --noedit $package
elif pacman -Qi pacaur &> /dev/null; then
echo "Installing with pacaur"
pacaur -S --noconfirm --noedit $package
elif pacman -Qi yaourt &> /dev/null; then
echo "Installing with yaourt"
yaourt -S --noconfirm $package
fi
# Just checking if installation was successful
if pacman -Qi $package &> /dev/null; then
echo "################################################################"
echo "######### "$package" has been installed"
echo "################################################################"
else
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
echo "!!!!!!!!! "$package" has NOT been installed"
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
fi
fi
|
erikdubois/AntergosGnome
|
installation/install-atom-editor-bin-v1.sh
|
Shell
|
gpl-2.0
| 2,007 |
. ./init.sh
mysql $M test -e "insert into t1 values(2, 200, 'aaaaaa')"
sleep 1
mysql $S2 -e "stop slave"
is_gtid_supported
if test $? = 1
then
mysql $S2 -e "change master to master_host='127.0.0.1', master_port=$S1P, master_user='rsandbox', master_password='rsandbox'"
else
mysql $S1 -e "reset master"
mysql $S2 -e "change master to master_host='127.0.0.1', master_port=$S1P, master_user='rsandbox', master_password='rsandbox', master_log_file='mysql-bin.000001', master_log_pos=4"
fi
mysql $S2 -e "start slave"
check_master $0 $S2P $S1P
masterha_master_switch --master_state=alive --interactive=0 --conf=$CONF --new_master_host=127.0.0.1 --new_master_port=$S1P > switch.log 2>&1
fail_if_zero $0 $?
masterha_master_switch --master_state=alive --interactive=0 --conf=mha_test_multi.cnf --new_master_host=127.0.0.1 --new_master_port=$S1P > switch.log 2>&1
fail_if_zero $0 $?
mysql $S1 -e "set global read_only=1"
masterha_master_switch --master_state=alive --interactive=0 --conf=$CONF --new_master_host=127.0.0.1 --new_master_port=$S1P > switch.log 2>&1
fail_if_zero $0 $?
masterha_master_switch --master_state=alive --interactive=0 --conf=mha_test_multi.cnf --new_master_host=127.0.0.1 --new_master_port=$S1P > switch.log 2>&1
fail_if_nonzero $0 $?
check_master $0 $S2P $S1P
mysql $S1 test -e "insert into t1 values(10000003, 300, 'bbbaaaaaaa');"
check_count $0 $MP 2
./check $0 3
|
ZuoGuocai/mha4mysql-manager
|
tests/t/t_online_3tier.sh
|
Shell
|
gpl-2.0
| 1,389 |
#!/bin/bash
# Bootstrap and exit if KOLLA_BOOTSTRAP variable is set. This catches all cases
# of the KOLLA_BOOTSTRAP variable being set, including empty.
if [[ "${!KOLLA_BOOTSTRAP[@]}" ]]; then
senlin-manage --config-file /etc/senlin/senlin.conf db_sync
exit 0
fi
|
mrangana/kolla
|
docker/senlin/senlin-api/extend_start.sh
|
Shell
|
apache-2.0
| 273 |
#!/bin/sh
# SUMMARY: Check that vmdk output format is generated
# LABELS: amd64
set -e
# Source libraries. Uncomment if needed/defined
#. "${RT_LIB}"
. "${RT_PROJECT_ROOT}/_lib/lib.sh"
NAME=check
clean_up() {
rm -f ${NAME}*
}
trap clean_up EXIT
linuxkit build -format vmdk -name "${NAME}" ../test.yml
[ -f "${NAME}.vmdk" ] || exit 1
exit 0
|
JohnnyLeone/linuxkit
|
test/cases/000_build/000_formats/007_vmdk/test.sh
|
Shell
|
apache-2.0
| 348 |
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script that creates a Kubemark cluster for any given cloud provider.
set -o errexit
set -o nounset
set -o pipefail
TMP_ROOT="$(dirname "${BASH_SOURCE}")/../.."
KUBE_ROOT=$(readlink -e ${TMP_ROOT} 2> /dev/null || perl -MCwd -e 'print Cwd::abs_path shift' ${TMP_ROOT})
source "${KUBE_ROOT}/test/kubemark/skeleton/util.sh"
source "${KUBE_ROOT}/test/kubemark/cloud-provider-config.sh"
source "${KUBE_ROOT}/test/kubemark/${CLOUD_PROVIDER}/util.sh"
source "${KUBE_ROOT}/cluster/kubemark/${CLOUD_PROVIDER}/config-default.sh"
if [[ -f "${KUBE_ROOT}/test/kubemark/${CLOUD_PROVIDER}/startup.sh" ]] ; then
source "${KUBE_ROOT}/test/kubemark/${CLOUD_PROVIDER}/startup.sh"
fi
source "${KUBE_ROOT}/cluster/kubemark/util.sh"
# hack/lib/init.sh will ovewrite ETCD_VERSION if this is unset
# what what is default in hack/lib/etcd.sh
# To avoid it, if it is empty, we set it to 'avoid-overwrite' and
# clean it after that.
if [ -z "${ETCD_VERSION:-}" ]; then
ETCD_VERSION="avoid-overwrite"
fi
source "${KUBE_ROOT}/hack/lib/init.sh"
if [ "${ETCD_VERSION:-}" == "avoid-overwrite" ]; then
ETCD_VERSION=""
fi
KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh"
KUBEMARK_DIRECTORY="${KUBE_ROOT}/test/kubemark"
RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources"
# Generate a random 6-digit alphanumeric tag for the kubemark image.
# Used to uniquify image builds across different invocations of this script.
KUBEMARK_IMAGE_TAG=$(head /dev/urandom | tr -dc 'a-z0-9' | fold -w 6 | head -n 1)
# Write all environment variables that we need to pass to the kubemark master,
# locally to the file ${RESOURCE_DIRECTORY}/kubemark-master-env.sh.
function create-master-environment-file {
cat > "${RESOURCE_DIRECTORY}/kubemark-master-env.sh" <<EOF
# Generic variables.
INSTANCE_PREFIX="${INSTANCE_PREFIX:-}"
SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-}"
EVENT_PD="${EVENT_PD:-}"
# Etcd related variables.
ETCD_IMAGE="${ETCD_IMAGE:-3.2.18-0}"
ETCD_VERSION="${ETCD_VERSION:-}"
# Controller-manager related variables.
CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS:-}"
ALLOCATE_NODE_CIDRS="${ALLOCATE_NODE_CIDRS:-}"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-}"
TERMINATED_POD_GC_THRESHOLD="${TERMINATED_POD_GC_THRESHOLD:-}"
# Scheduler related variables.
SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_ARGS:-}"
# Apiserver related variables.
APISERVER_TEST_ARGS="${APISERVER_TEST_ARGS:-}"
STORAGE_MEDIA_TYPE="${STORAGE_MEDIA_TYPE:-}"
STORAGE_BACKEND="${STORAGE_BACKEND:-etcd3}"
ETCD_COMPACTION_INTERVAL_SEC="${ETCD_COMPACTION_INTERVAL_SEC:-}"
RUNTIME_CONFIG="${RUNTIME_CONFIG:-}"
NUM_NODES="${NUM_NODES:-}"
CUSTOM_ADMISSION_PLUGINS="${CUSTOM_ADMISSION_PLUGINS:-}"
FEATURE_GATES="${FEATURE_GATES:-}"
KUBE_APISERVER_REQUEST_TIMEOUT="${KUBE_APISERVER_REQUEST_TIMEOUT:-}"
ENABLE_APISERVER_ADVANCED_AUDIT="${ENABLE_APISERVER_ADVANCED_AUDIT:-}"
EOF
echo "Created the environment file for master."
}
# Generate certs/keys for CA, master, kubelet and kubecfg, and tokens for kubelet
# and kubeproxy.
function generate-pki-config {
kube::util::ensure-temp-dir
gen-kube-bearertoken
gen-kube-basicauth
create-certs ${MASTER_IP}
KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
NODE_PROBLEM_DETECTOR_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
HEAPSTER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
CLUSTER_AUTOSCALER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_DNS_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
echo "Generated PKI authentication data for kubemark."
}
# Wait for the master to be reachable for executing commands on it. We do this by
# trying to run the bash noop(:) on the master, with 10 retries.
function wait-for-master-reachability {
execute-cmd-on-master-with-retries ":" 10
echo "Checked master reachability for remote command execution."
}
# Write all the relevant certs/keys/tokens to the master.
function write-pki-config-to-master {
PKI_SETUP_CMD="sudo mkdir /home/kubernetes/k8s_auth_data -p && \
sudo bash -c \"echo ${CA_CERT_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/ca.crt\" && \
sudo bash -c \"echo ${MASTER_CERT_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/server.cert\" && \
sudo bash -c \"echo ${MASTER_KEY_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/server.key\" && \
sudo bash -c \"echo ${REQUESTHEADER_CA_CERT_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/aggr_ca.crt\" && \
sudo bash -c \"echo ${PROXY_CLIENT_CERT_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/proxy_client.crt\" && \
sudo bash -c \"echo ${PROXY_CLIENT_KEY_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/proxy_client.key\" && \
sudo bash -c \"echo ${KUBECFG_CERT_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/kubecfg.crt\" && \
sudo bash -c \"echo ${KUBECFG_KEY_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/kubecfg.key\" && \
sudo bash -c \"echo \"${KUBE_BEARER_TOKEN},admin,admin\" > /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
sudo bash -c \"echo \"${KUBELET_TOKEN},system:node:node-name,uid:kubelet,system:nodes\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
sudo bash -c \"echo \"${KUBE_PROXY_TOKEN},system:kube-proxy,uid:kube_proxy\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
sudo bash -c \"echo \"${HEAPSTER_TOKEN},system:heapster,uid:heapster\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
sudo bash -c \"echo \"${CLUSTER_AUTOSCALER_TOKEN},system:cluster-autoscaler,uid:cluster-autoscaler\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
sudo bash -c \"echo \"${NODE_PROBLEM_DETECTOR_TOKEN},system:node-problem-detector,uid:system:node-problem-detector\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
sudo bash -c \"echo \"${KUBE_DNS_TOKEN},system:kube-dns,uid:kube-dns\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
sudo bash -c \"echo ${KUBE_PASSWORD},admin,admin > /home/kubernetes/k8s_auth_data/basic_auth.csv\""
execute-cmd-on-master-with-retries "${PKI_SETUP_CMD}" 3
echo "Wrote PKI certs, keys, tokens and admin password to master."
}
# Write kubeconfig to ${RESOURCE_DIRECTORY}/kubeconfig.kubemark in order to
# use kubectl locally.
function write-local-kubeconfig {
LOCAL_KUBECONFIG="${RESOURCE_DIRECTORY}/kubeconfig.kubemark"
cat > "${LOCAL_KUBECONFIG}" << EOF
apiVersion: v1
kind: Config
users:
- name: kubecfg
user:
client-certificate-data: "${KUBECFG_CERT_BASE64}"
client-key-data: "${KUBECFG_KEY_BASE64}"
username: admin
password: admin
clusters:
- name: kubemark
cluster:
certificate-authority-data: "${CA_CERT_BASE64}"
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: kubecfg
name: kubemark-context
current-context: kubemark-context
EOF
echo "Kubeconfig file for kubemark master written to ${LOCAL_KUBECONFIG}."
}
# Copy all the necessary resource files (scripts/configs/manifests) to the master.
function copy-resource-files-to-master {
copy-files \
"${SERVER_BINARY_TAR}" \
"${RESOURCE_DIRECTORY}/kubemark-master-env.sh" \
"${RESOURCE_DIRECTORY}/start-kubemark-master.sh" \
"${RESOURCE_DIRECTORY}/kubeconfig.kubemark" \
"${KUBEMARK_DIRECTORY}/configure-kubectl.sh" \
"${RESOURCE_DIRECTORY}/manifests/etcd.yaml" \
"${RESOURCE_DIRECTORY}/manifests/etcd-events.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-apiserver.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-scheduler.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-controller-manager.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-addon-manager.yaml" \
"${RESOURCE_DIRECTORY}/manifests/addons/kubemark-rbac-bindings" \
"kubernetes@${MASTER_NAME}":/home/kubernetes/
echo "Copied server binary, master startup scripts, configs and resource manifests to master."
}
# Make startup scripts executable and run start-kubemark-master.sh.
function start-master-components {
echo ""
MASTER_STARTUP_CMD="sudo bash /home/kubernetes/start-kubemark-master.sh"
execute-cmd-on-master-with-retries "${MASTER_STARTUP_CMD}"
echo "The master has started and is now live."
}
# Create a docker image for hollow-node and upload it to the appropriate docker registry.
function create-and-upload-hollow-node-image {
authenticate-docker
KUBEMARK_IMAGE_REGISTRY="${KUBEMARK_IMAGE_REGISTRY:-${CONTAINER_REGISTRY}/${PROJECT}}"
if [[ "${KUBEMARK_BAZEL_BUILD:-}" =~ ^[yY]$ ]]; then
# Build+push the image through bazel.
touch WORKSPACE # Needed for bazel.
build_cmd=("bazel" "run" "//cluster/images/kubemark:push" "--define" "REGISTRY=${KUBEMARK_IMAGE_REGISTRY}" "--define" "IMAGE_TAG=${KUBEMARK_IMAGE_TAG}")
run-cmd-with-retries "${build_cmd[@]}"
else
# Build+push the image through makefile.
build_cmd=("make" "${KUBEMARK_IMAGE_MAKE_TARGET}")
MAKE_DIR="${KUBE_ROOT}/cluster/images/kubemark"
KUBEMARK_BIN="$(kube::util::find-binary-for-platform kubemark linux/amd64)"
if [[ -z "${KUBEMARK_BIN}" ]]; then
echo 'Cannot find cmd/kubemark binary'
exit 1
fi
echo "Copying kubemark binary to ${MAKE_DIR}"
cp "${KUBEMARK_BIN}" "${MAKE_DIR}"
CURR_DIR=`pwd`
cd "${MAKE_DIR}"
REGISTRY=${KUBEMARK_IMAGE_REGISTRY} IMAGE_TAG=${KUBEMARK_IMAGE_TAG} run-cmd-with-retries "${build_cmd[@]}"
rm kubemark
cd $CURR_DIR
fi
echo "Created and uploaded the kubemark hollow-node image to docker registry."
}
# Generate secret and configMap for the hollow-node pods to work, prepare
# manifests of the hollow-node and heapster replication controllers from
# templates, and finally create these resources through kubectl.
function create-kube-hollow-node-resources {
# Create kubeconfig for Kubelet.
KUBELET_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
kind: Config
users:
- name: kubelet
user:
client-certificate-data: "${KUBELET_CERT_BASE64}"
client-key-data: "${KUBELET_KEY_BASE64}"
clusters:
- name: kubemark
cluster:
certificate-authority-data: "${CA_CERT_BASE64}"
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: kubelet
name: kubemark-context
current-context: kubemark-context")
# Create kubeconfig for Kubeproxy.
KUBEPROXY_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
kind: Config
users:
- name: kube-proxy
user:
token: ${KUBE_PROXY_TOKEN}
clusters:
- name: kubemark
cluster:
insecure-skip-tls-verify: true
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: kube-proxy
name: kubemark-context
current-context: kubemark-context")
# Create kubeconfig for Heapster.
HEAPSTER_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
kind: Config
users:
- name: heapster
user:
token: ${HEAPSTER_TOKEN}
clusters:
- name: kubemark
cluster:
insecure-skip-tls-verify: true
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: heapster
name: kubemark-context
current-context: kubemark-context")
# Create kubeconfig for Cluster Autoscaler.
CLUSTER_AUTOSCALER_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
kind: Config
users:
- name: cluster-autoscaler
user:
token: ${CLUSTER_AUTOSCALER_TOKEN}
clusters:
- name: kubemark
cluster:
insecure-skip-tls-verify: true
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: cluster-autoscaler
name: kubemark-context
current-context: kubemark-context")
# Create kubeconfig for NodeProblemDetector.
NPD_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
kind: Config
users:
- name: node-problem-detector
user:
token: ${NODE_PROBLEM_DETECTOR_TOKEN}
clusters:
- name: kubemark
cluster:
insecure-skip-tls-verify: true
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: node-problem-detector
name: kubemark-context
current-context: kubemark-context")
# Create kubeconfig for Kube DNS.
KUBE_DNS_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
kind: Config
users:
- name: kube-dns
user:
token: ${KUBE_DNS_TOKEN}
clusters:
- name: kubemark
cluster:
insecure-skip-tls-verify: true
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: kube-dns
name: kubemark-context
current-context: kubemark-context")
# Create kubemark namespace.
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/kubemark-ns.json"
# Create configmap for configuring hollow- kubelet, proxy and npd.
"${KUBECTL}" create configmap "node-configmap" --namespace="kubemark" \
--from-literal=content.type="${TEST_CLUSTER_API_CONTENT_TYPE}" \
--from-file=kernel.monitor="${RESOURCE_DIRECTORY}/kernel-monitor.json"
# Create secret for passing kubeconfigs to kubelet, kubeproxy and npd.
"${KUBECTL}" create secret generic "kubeconfig" --type=Opaque --namespace="kubemark" \
--from-literal=kubelet.kubeconfig="${KUBELET_KUBECONFIG_CONTENTS}" \
--from-literal=kubeproxy.kubeconfig="${KUBEPROXY_KUBECONFIG_CONTENTS}" \
--from-literal=heapster.kubeconfig="${HEAPSTER_KUBECONFIG_CONTENTS}" \
--from-literal=cluster_autoscaler.kubeconfig="${CLUSTER_AUTOSCALER_KUBECONFIG_CONTENTS}" \
--from-literal=npd.kubeconfig="${NPD_KUBECONFIG_CONTENTS}" \
--from-literal=dns.kubeconfig="${KUBE_DNS_KUBECONFIG_CONTENTS}"
# Create addon pods.
# Heapster.
mkdir -p "${RESOURCE_DIRECTORY}/addons"
sed "s/{{MASTER_IP}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/heapster_template.json" > "${RESOURCE_DIRECTORY}/addons/heapster.json"
metrics_mem_per_node=4
metrics_mem=$((200 + ${metrics_mem_per_node}*${NUM_NODES}))
sed -i'' -e "s/{{METRICS_MEM}}/${metrics_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
metrics_cpu_per_node_numerator=${NUM_NODES}
metrics_cpu_per_node_denominator=2
metrics_cpu=$((80 + metrics_cpu_per_node_numerator / metrics_cpu_per_node_denominator))
sed -i'' -e "s/{{METRICS_CPU}}/${metrics_cpu}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
eventer_mem_per_node=500
eventer_mem=$((200 * 1024 + ${eventer_mem_per_node}*${NUM_NODES}))
sed -i'' -e "s/{{EVENTER_MEM}}/${eventer_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
# Cluster Autoscaler.
if [[ "${ENABLE_KUBEMARK_CLUSTER_AUTOSCALER:-}" == "true" ]]; then
echo "Setting up Cluster Autoscaler"
KUBEMARK_AUTOSCALER_MIG_NAME="${KUBEMARK_AUTOSCALER_MIG_NAME:-${NODE_INSTANCE_PREFIX}-group}"
KUBEMARK_AUTOSCALER_MIN_NODES="${KUBEMARK_AUTOSCALER_MIN_NODES:-0}"
KUBEMARK_AUTOSCALER_MAX_NODES="${KUBEMARK_AUTOSCALER_MAX_NODES:-10}"
NUM_NODES=${KUBEMARK_AUTOSCALER_MAX_NODES}
echo "Setting maximum cluster size to ${NUM_NODES}."
KUBEMARK_MIG_CONFIG="autoscaling.k8s.io/nodegroup: ${KUBEMARK_AUTOSCALER_MIG_NAME}"
sed "s/{{master_ip}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/cluster-autoscaler_template.json" > "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
sed -i'' -e "s/{{kubemark_autoscaler_mig_name}}/${KUBEMARK_AUTOSCALER_MIG_NAME}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
sed -i'' -e "s/{{kubemark_autoscaler_min_nodes}}/${KUBEMARK_AUTOSCALER_MIN_NODES}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
sed -i'' -e "s/{{kubemark_autoscaler_max_nodes}}/${KUBEMARK_AUTOSCALER_MAX_NODES}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
fi
# Kube DNS.
if [[ "${ENABLE_KUBEMARK_KUBE_DNS:-}" == "true" ]]; then
echo "Setting up kube-dns"
sed "s/{{dns_domain}}/${KUBE_DNS_DOMAIN}/g" "${RESOURCE_DIRECTORY}/kube_dns_template.yaml" > "${RESOURCE_DIRECTORY}/addons/kube_dns.yaml"
fi
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/addons" --namespace="kubemark"
# Create the replication controller for hollow-nodes.
# We allow to override the NUM_REPLICAS when running Cluster Autoscaler.
NUM_REPLICAS=${NUM_REPLICAS:-${NUM_NODES}}
sed "s/{{numreplicas}}/${NUM_REPLICAS}/g" "${RESOURCE_DIRECTORY}/hollow-node_template.yaml" > "${RESOURCE_DIRECTORY}/hollow-node.yaml"
proxy_cpu=20
if [ "${NUM_NODES}" -gt 1000 ]; then
proxy_cpu=50
fi
proxy_mem_per_node=50
proxy_mem=$((100 * 1024 + ${proxy_mem_per_node}*${NUM_NODES}))
sed -i'' -e "s/{{HOLLOW_PROXY_CPU}}/${proxy_cpu}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s/{{HOLLOW_PROXY_MEM}}/${proxy_mem}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s'{{kubemark_image_registry}}'${KUBEMARK_IMAGE_REGISTRY}'g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s/{{kubemark_image_tag}}/${KUBEMARK_IMAGE_TAG}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s/{{master_ip}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s/{{hollow_kubelet_params}}/${HOLLOW_KUBELET_TEST_ARGS}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s/{{hollow_proxy_params}}/${HOLLOW_PROXY_TEST_ARGS}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s'{{kubemark_mig_config}}'${KUBEMARK_MIG_CONFIG:-}'g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/hollow-node.yaml" --namespace="kubemark"
echo "Created secrets, configMaps, replication-controllers required for hollow-nodes."
}
# Wait until all hollow-nodes are running or there is a timeout.
function wait-for-hollow-nodes-to-run-or-timeout {
echo -n "Waiting for all hollow-nodes to become Running"
start=$(date +%s)
nodes=$("${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node 2> /dev/null) || true
ready=$(($(echo "${nodes}" | grep -v "NotReady" | wc -l) - 1))
until [[ "${ready}" -ge "${NUM_REPLICAS}" ]]; do
echo -n "."
sleep 1
now=$(date +%s)
# Fail it if it already took more than 30 minutes.
if [ $((now - start)) -gt 1800 ]; then
echo ""
echo -e "${color_red} Timeout waiting for all hollow-nodes to become Running. ${color_norm}"
# Try listing nodes again - if it fails it means that API server is not responding
if "${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node &> /dev/null; then
echo "Found only ${ready} ready hollow-nodes while waiting for ${NUM_NODES}."
else
echo "Got error while trying to list hollow-nodes. Probably API server is down."
fi
pods=$("${KUBECTL}" get pods -l name=hollow-node --namespace=kubemark) || true
running=$(($(echo "${pods}" | grep "Running" | wc -l)))
echo "${running} hollow-nodes are reported as 'Running'"
not_running=$(($(echo "${pods}" | grep -v "Running" | wc -l) - 1))
echo "${not_running} hollow-nodes are reported as NOT 'Running'"
echo $(echo "${pods}" | grep -v "Running")
exit 1
fi
nodes=$("${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node 2> /dev/null) || true
ready=$(($(echo "${nodes}" | grep -v "NotReady" | wc -l) - 1))
done
echo -e "${color_green} Done!${color_norm}"
}
############################### Main Function ########################################
detect-project &> /dev/null
# Setup for master.
echo -e "${color_yellow}STARTING SETUP FOR MASTER${color_norm}"
find-release-tars
create-master-environment-file
create-master-instance-with-resources
generate-pki-config
wait-for-master-reachability
write-pki-config-to-master
write-local-kubeconfig
copy-resource-files-to-master
start-master-components
# Setup for hollow-nodes.
echo ""
echo -e "${color_yellow}STARTING SETUP FOR HOLLOW-NODES${color_norm}"
create-and-upload-hollow-node-image
create-kube-hollow-node-resources
wait-for-hollow-nodes-to-run-or-timeout
echo ""
echo "Master IP: ${MASTER_IP}"
echo "Password to kubemark master: ${KUBE_PASSWORD}"
echo "Kubeconfig for kubemark master is written in ${LOCAL_KUBECONFIG}"
|
huzhengchuan/kubernetes
|
test/kubemark/start-kubemark.sh
|
Shell
|
apache-2.0
| 20,642 |
#!/bin/sh
. ../../dttools/test/test_runner_common.sh
test_dir=`basename $0 .sh`.dir
test_output=`basename $0 .sh`.output
prepare()
{
mkdir $test_dir
cd $test_dir
ln -sf ../syntax/variable_scope.makeflow Makeflow
cat > ../$test_output <<EOF
0
1
1 2
0
1
EOF
exit 0
}
run()
{
cd $test_dir
../../src/makeflow -d all;
if [ $? -eq 0 ]; then
exec diff ../$test_output out.all
else
exit 1
fi
}
clean()
{
rm -fr $test_dir $test_output
exit 0
}
dispatch "$@"
# vim: set noexpandtab tabstop=4:
|
nhazekam/cctools
|
makeflow/test/TR_makeflow_020_syntax_variable_scope.sh
|
Shell
|
gpl-2.0
| 505 |
#!/usr/bin/env bash
DIR=$(git rev-parse --show-toplevel)
REV=$(git log -n1 --pretty=%H)
pushd ${DIR} > /dev/null || exit
code=0
modules=(core gui analysis server)
for module in "${modules[@]}"; do
cp python/${module}/${module}_auto.sip python/${module}/${module}_auto.sip.$REV.bak
done
./scripts/sip_include.sh
for module in "${modules[@]}"; do
outdiff=$(diff python/${module}/${module}_auto.sip python/${module}/${module}_auto.sip.$REV.bak)
if [[ -n $outdiff ]]; then
echo -e " *** SIP include file for \x1B[33m${module}\x1B[0m not up to date."
echo "$outdiff"
code=1
mv python/${module}/${module}_auto.sip.$REV.bak python/${module}/${module}_auto.sip
else
rm python/${module}/${module}_auto.sip.$REV.bak
fi
done
if [[ code -eq 1 ]]; then
echo -e " Run \x1B[33m./scripts/sip_include.sh\x1B[0m to add to fix this."
echo -e " If a header should not have a sip file created, add \x1B[33m#define SIP_NO_FILE\x1B[0m."
fi
popd > /dev/null || exit
exit $code
|
mhugo/QGIS
|
tests/code_layout/test_sip_include.sh
|
Shell
|
gpl-2.0
| 995 |
#!/bin/bash
#
# Copyright 2007 Luis R. Rodriguez <[email protected]>
#
# Use this to parse a small .config equivalent looking file to generate
# our own autoconf.h. This file has defines for each config option
# just like the kernels include/linux/autoconf.h
#
# XXX: consider using scripts/kconfig/confdata.c instead.
# On the downside this would require the user to have libc though.
# This indicates which is the oldest kernel we support
# Update this if you are adding support for older kernels.
OLDEST_KERNEL_SUPPORTED="2.6.24"
COMPAT_RELEASE="compat_version"
KERNEL_RELEASE="compat_base_tree_version"
MULT_DEP_FILE=".compat_pivot_dep"
if [ $# -ne 1 ]; then
echo "Usage $0 config-file"
exit
fi
COMPAT_CONFIG="$1"
if [ ! -f $COMPAT_CONFIG ]; then
echo "File $1 is not a file"
exit
fi
if [ ! -f $COMPAT_RELEASE -o ! -f $KERNEL_RELEASE ]; then
echo "Error: $COMPAT_RELEASE or $KERNEL_RELEASE file is missing"
exit
fi
CREL=$(cat $COMPAT_RELEASE | tail -1)
KREL=$(cat $KERNEL_RELEASE | tail -1)
DATE=$(date)
# Defines a CONFIG_ option if not defined yet, this helps respect
# linux/autoconf.h
function define_config {
VAR=$1
VALUE=$2
case $VALUE in
n) # Try to undefine it
echo "#undef $VAR"
;;
y)
echo "#ifndef $VAR"
echo "#define $VAR 1"
echo "#endif /* $VAR */ "
;;
m)
echo "#ifndef $VAR"
echo "#define $VAR 1"
echo "#endif /* $VAR */ "
;;
*) # Assume string
# XXX: add better checks to make sure what was on
# the right was indeed a string
echo "#ifndef $VAR"
echo "#define $VAR \"$VALUE\""
echo "#endif /* $VAR */ "
;;
esac
}
# This deals with core compat-wireless kernel requirements.
function define_config_req {
VAR=$1
echo "#ifndef $VAR"
echo -n "#error Compat-wireless requirement: $VAR must be enabled "
echo "in your kernel"
echo "#endif /* $VAR */"
}
# This handles modules which have dependencies from the kernel
# which compat-wireless isn't providing yet either because
# the dependency is not available as kernel module or
# the module simply isn't provided by compat-wireless.
function define_config_dep {
VAR=$1
VALUE=$2
DEP=$3
WARN_VAR="COMPAT_WARN_$VAR"
echo "#ifdef $DEP"
define_config $VAR $VALUE
echo "#else"
# XXX: figure out a way to warn only once
# define only once in case user tried to enable config option
# twice in config.mk
echo "#ifndef $WARN_VAR"
# Lets skip these for now.. they might be too annoying
#echo "#warning Skipping $VAR as $DEP was needed... "
#echo "#warning This just means $VAR won't be built and is not fatal."
echo "#define $WARN_VAR"
echo "#endif /* $VAR */"
echo "#endif /* $WARN_VAR */"
}
# This handles options which have *multiple* dependencies from the kernel
function define_config_multiple_deps {
VAR=$1
VALUE=$2
DEP_ARRAY=$3
# First, put all ifdefs
for i in $(cat $MULT_DEP_FILE); do
echo "#ifdef $i"
done
# Now put our option in the middle
define_config $VAR $VALUE
# Now close all ifdefs
# First, put all ifdefs
for i in $(cat $MULT_DEP_FILE); do
echo "#endif"
done
}
function kernel_version_req {
VERSION=$(echo $1 | sed -e 's/\./,/g')
echo "#if (LINUX_VERSION_CODE < KERNEL_VERSION($VERSION))"
echo "#error Compat-wireless requirement: Linux >= $VERSION"
echo "#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION($VERSION) */ "
}
cat <<EOF
#ifndef COMPAT_AUTOCONF_INCLUDED
#define COMPAT_AUTOCONF_INCLUDED
/*
* Automatically generated C config: don't edit
* $DATE
* compat-wireless-2.6: $CREL
* linux-2.6: $KREL
*/
#define COMPAT_RELEASE "$CREL"
#define COMPAT_KERNEL_RELEASE "$KREL"
EOF
# Checks user is compiling against a kernel we support
kernel_version_req $OLDEST_KERNEL_SUPPORTED
# For each CONFIG_FOO=x option
for i in $(egrep '^CONFIG_|^ifdef CONFIG_|^ifndef CONFIG_|^endif #CONFIG_|^else #CONFIG_' $COMPAT_CONFIG | sed 's/ /+/'); do
case $i in
'ifdef+CONFIG_'* )
echo "#$i" | sed -e 's/+/ /' -e 's/\(ifdef CONFIG_COMPAT_KERNEL_\)\([0-9]*\)/if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,\2))/' -e 's/\(#ifdef \)\(CONFIG_[^:space:]*\)/#if defined(\2) || defined(\2_MODULE)/'
continue
;;
'ifndef+CONFIG_'* )
echo "#$i" | sed -e 's/+/ /' -e 's/\(ifndef CONFIG_COMPAT_KERNEL_\)\([0-9]*\)/if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,\2))/' -e 's/\(#ifndef \)\(CONFIG_[^:space:]*\)/#if !defined(\2) && !defined(\2_MODULE)/'
continue
;;
'else+#CONFIG_'* | 'endif+#CONFIG_'* )
echo "#$i */" |sed -e 's/+#/ \/* /g'
continue
;;
CONFIG_* )
# Get the element on the left of the "="
VAR=$(echo $i | cut -d"=" -f 1)
# Get the element on the right of the "="
VALUE=$(echo $i | cut -d"=" -f 2)
# Handle core kernel module depenencies here.
case $VAR in
# ignore this, we have a special hanlder for this at the botttom
# instead. We still need to keep this in config.mk to let Makefiles
# know its enabled so just ignore it here.
CONFIG_MAC80211_QOS)
continue
;;
esac
# Any other module which can *definitely* be built as a module goes here
define_config $VAR $VALUE
continue
;;
esac
done
# Deal with special cases. CONFIG_MAC80211_QOS is such a case.
# We handle this specially for different kernels we support.
if [ -f $KLIB_BUILD/Makefile ]; then
SUBLEVEL=$(make -C $KLIB_BUILD kernelversion | sed -n 's/^2\.6\.\([0-9]\+\).*/\1/p')
if [ $SUBLEVEL -le 22 ]; then
define_config CONFIG_MAC80211_QOS y
else # kernel >= 2.6.23
# CONFIG_MAC80211_QOS on these kernels requires
# CONFIG_NET_SCHED and CONFIG_NETDEVICES_MULTIQUEUE
rm -f $MULT_DEP_FILE
echo CONFIG_NET_SCHED >> $MULT_DEP_FILE
echo CONFIG_NETDEVICES_MULTIQUEUE >> $MULT_DEP_FILE
define_config_multiple_deps CONFIG_MAC80211_QOS y $ALL_DEPS
rm -f $MULT_DEP_FILE
fi
fi
echo "#endif /* COMPAT_AUTOCONF_INCLUDED */"
|
kbc-developers/android_kernel_semc_xperia2011
|
ti_wlan/compat-wireless-wl12xx/scripts/gen-compat-autoconf.sh
|
Shell
|
gpl-2.0
| 5,728 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.