code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env bash
go get github.com/mitchellh/gox # && goxc -t > /dev/null 2>&1
|
alixaxel/halBox
|
packages/ubuntu/trusty/go/gox.sh
|
Shell
|
mit
| 83 |
#!/bin/bash
#
# Set up a super simple web server and make it accept GET and POST requests
# for Sensu plugin testing.
#
set -e
# base utilities that need to exist to start bootatraping
apt-get update
apt-get install -y build-essential
# setup the rubies
source /etc/profile
DATA_DIR=/tmp/kitchen/data
RUBY_HOME=${MY_RUBY_HOME}
# Start bootatraping
## install some required deps for pg_gem to install
# End of Actual bootatrap
# Install gems
cd $DATA_DIR
SIGN_GEM=false gem build sensu-plugins-mesos.gemspec
gem install sensu-plugins-mesos-*.gem
|
sensu-plugins/sensu-plugins-mesos
|
test/fixtures/bootstrap.sh
|
Shell
|
mit
| 553 |
#! /bin/bash -x
# Emulator archive script. This will handle creating a folder which contains all
# the various pieces needed to run the emulator. This script can only be run
# after a full successful build has completed for QEMU.
dest=$1
mkdir -p $dest/{bin,android-emulator,system,prebuilts/qemu-kernel/arm}
# emulator specific helpers ...
cp -ra prebuilts/android-emulator $dest
rm -rf $dest/android-emulator/windows # Sry...
# emulator kernel
# TODO: Support other emulator kernels ? (x86, arm64, etc... ?)
cp \
$ANDROID_BUILD_TOP/prebuilts/qemu-kernel/arm/kernel-qemu-armv7 \
$dest/prebuilts/qemu-kernel/arm/
cp $ANDROID_PRODUCT_OUT/*.img $dest/
cp $ANDROID_PRODUCT_OUT/system/build.prop $dest/system
# Add some preamble which sets some common android build system environment
# variables. These variables are used by the emulator command to find where
# various libraries are which allows us to share run-emulator script.
cat > $dest/bin/run-emulator <<'EOF'
#! /bin/bash
# Resolve to realpath....
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
done
ROOT="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )"
export ANDROID_PRODUCT_OUT=$ROOT
export ANDROID_BUILD_TOP=$ROOT
case $(uname -s) in
Darwin)
PATH=$ROOT/android-emulator/darwin-x86_64:$PATH
;;
Linux)
PATH=$ROOT/android-emulator/linux-x86_64:$PATH
;;
*)
echo Unsupported platform: $(uname -s)
exit 1
;;
esac
EOF
cat $ANDROID_BUILD_TOP/run-emulator >> $dest/bin/run-emulator
chmod u+x $dest/bin/run-emulator
|
silklabs/silk
|
bsp-gonk/board/qemu/archive.sh
|
Shell
|
mit
| 1,626 |
#! /bin/bash
mv ~/Library/Developer/Xcode/UserData/CodeSnippets ~/Library/Developer/Xcode/UserData/CodeSnippets.backup
mv ~/Library/Developer/Xcode/UserData/FontAndColorThemes ~/Library/Developer/Xcode/UserData/FontAndColorThemes.backup
mv ~/Library/Application\ Support/Developer/Shared/Xcode/Plug-ins ~/Library/Application\ Support/Developer/Shared/Xcode/Plug-ins.backup
SRC_HOME=`pwd`
ln -s ${SRC_HOME}/CodeSnippets ~/Library/Developer/Xcode/UserData/CodeSnippets
ln -s ${SRC_HOME}/FontAndColorThemes ~/Library/Developer/Xcode/UserData/FontAndColorThemes
ln -s ${SRC_HOME}/Plug-ins ~/Library/Application\ Support/Developer/Shared/Xcode/Plug-ins
#echo "install newversion vim"
#brew install vim
#alias vim='/usr/local/Cellar/vim/7.4.488/bin'
#echo "install vundle (vim plugin manager)"
#mkdir .vim
#touch .vimrc (copy from git)
#git clone https://github.com/woshizilong/Vundle.vim.git ~/.vim/bundle/Vundle.vim
#echo "install YouCompleteMe"
#brew install cmake
#cd ~/.vim/bundle/YouCompleteMe
#./install.sh --clang-completer
#升级Xcode6.3插件失效解决办法
#1.打开终端,输入以下命令 【最后一项是获取到的DVTPlugInCompatibilityUUID】
#find ~/Library/Application\ Support/Developer/Shared/Xcode/Plug-ins -name Info.plist -maxdepth 3 | xargs -I{} defaults write {} DVTPlugInCompatibilityUUIDs -array-add `defaults read /Applications/Xcode.app/Contents/Info.plist DVTPlugInCompatibilityUUID`
#2.如果没起作用将所有插件移除Plug-ins目录,重启Xcode后关闭,再将所有插件移回Plug-ins目录,重启Xcode,提示框中选择Load
echo "done"
|
woshizilong/Xcode-Tools
|
setup_Xcode-Tools.sh
|
Shell
|
mit
| 1,598 |
#!/bin/sh
# Check if it has root permission
if [[ "$UID" -ne "$ROOT_UID" ]] ; then
printf "죄송합니다! 관리자 권한으로 실행하여 주세요. 화면에 \"sudo 03_make_device_app.sh\" 이라고 입력합니다.\n"
exit 1
fi
printf "마지막 설정입니다. 이제 가상의 시뮬레이션 앱을 만들어 보도록 하겠습니다.(풍속 / 온도 / 습도 / 먼지농도)"
printf "처음 가상의 장치를 만들때 생성하였던 Key와 호스트 이름을 입력합니다.\n"
read -r -p "호스트 이름을 입력하세요. (IoTHub -> 개요 -> 호스트 이름 -> 호스트이름.azure-devices.net / .azure-devices.net은 복사하지 마세요!) : " USER_Hostname
echo $USER_Hostname
read -r -p "메모장에 복사하였던 장치 Key를 입력하세요. : " USER_VirtualDeviceKey
echo $USER_VirtualDeviceKey
sed -i "s~{youriothubname}~$USER_Hostname~g" ./simulated-device/src/main/java/com/mycompany/app/App.java
sed -i "s~{yourdevicekey}~$USER_VirtualDeviceKey~g" ./simulated-device/src/main/java/com/mycompany/app/App.java
mvn clean package -DskipTests -f ./simulated-device
printf "잘했습니다! 시뮬레이션 앱을 만들었습니다.\n"
printf "지금까지의 모든 창과 인터넷 창은 종료하여 주십시오.\n"
sleep 10s
|
janghe11/IoT_Hands-On-Lab
|
03_iot-simulator/03_make_device_app.sh
|
Shell
|
mit
| 1,278 |
if [ -d ./bin ]; then
echo "Cleaning bin directory..."
rm -rf ./bin/*
fi
echo "Installing dependencies..."
govendor sync
echo "Compiling assets..."
go-bindata -prefix assets/ -pkg main -o asset.go assets/
for GOOS in darwin windows linux; do
for GOARCH in 386 amd64; do
echo "Building $GOOS/$GOARCH..."
FILENAME="pokevision"
if [ "windows" == $GOOS ]; then
FILENAME="pokevision.exe"
fi
GOOS=$GOOS GOARCH=$GOARCH go build -o ./bin/$GOOS/$GOARCH/$FILENAME ./
if [ "windows" == $GOOS ]; then
zip -rjX ./bin/$GOOS-$GOARCH.zip ./bin/$GOOS/$GOARCH/
else
tar -C ./bin/$GOOS/$GOARCH/ -cvzf ./bin/$GOOS-$GOARCH.tar.gz .
fi
done
done
echo "Building linux/arm..."
GOOS=linux GOARCH=arm go build -o ./bin/linux/arm/pokevision ./
tar -C ./bin/linux/arm/ -cvzf ./bin/linux-arm.tar.gz .
|
jacobmarshall/pokevision-cli
|
build.sh
|
Shell
|
mit
| 844 |
## Install Steam
wget -P/tmp http://repo.steampowered.com/steam/archive/precise/steam_latest.deb
sudo gdebi /tmp/steam_latest.deb
rm /tmp/steam_latest.deb
|
vakimov/dotfiles
|
linux_mint/bootstrap.steam.sh
|
Shell
|
mit
| 157 |
#!/bin/bash
#
# Copyright (c) 2016 Intel Corporation. All rights reserved.
# Use of this source code is governed by a MIT-style license that can be
# found in the LICENSE file.
#
#set -x
# Run script as root, unless permissions are dropped elsewhere."
echo "Please input the test lasting time (seconds): "
read num
END=`expr $num / 10`
cpu_log=cpu_used.dat
mem_log=mem_info.dat
gpu_log=gpu.dat
echo "" > gpu.dat
echo "#time cpu_used" > cpu_used.dat
echo "#time total free used buff/cache" > mem_info.dat
intel_gpu_top -o $gpu_log 2>&1 > /dev/null &
sleep 10
for ((i=1;i<=END;i++))
do
top_info=`top -bn1 |grep -E "KiB Mem|$1"| paste -s -d "!"`
cpu_used=`echo $top_info|awk -F '!' '{print $2}'|awk '{print $9}'|awk -F '.' '{print $1}'`
mem_info=`echo $top_info|awk -F ' ' '{print $4,$6,$8,$10,$8/$4*100}'`
n=`expr $i \* 10`
echo $n $cpu_used >> $cpu_log
echo $n $mem_info >> $mem_log
remain=`expr $num - $i \* 10 + 10`
h=`expr $remain / 3600`
m=`expr $remain % 3600 / 60`
s=`expr $remain % 60`
echo "test remaining $h hour(s) $m minute(s) $s second(s)"
sleep 10
done
gpu_pids=`ps aux | grep intel_gpu_top|grep -v grep| awk '{print $2}'`
for pid in $gpu_pids;do
kill -9 $pid 2>&1 > /dev/null
done
echo "Done. result kept at $cpu_log, $mem_log and $gpu_log"
kill -9 $$
|
lisa0314/node-realsense-1
|
src/person-tracking/test/StableTest/getUsageRecord.sh
|
Shell
|
mit
| 1,294 |
#!/bin/sh
cc=$1
file=$2
$cc -c $file -o ${file}.o || exit 1
readelf -s ${file}.o | awk '
BEGIN { missing=2; }
$8 ~ /(foo|puts)/ { missing -= 1; }
END { exit missing }';
exit $?
|
larmel/c-compiler
|
test/c99/inline-extern.c.sh
|
Shell
|
mit
| 183 |
#!/bin/bash
OPTIND=1
TESTED_CLIENT=0
TESTED_SERVER=0
TESTS_FAILED=0
SHOULD_REPORT=0
# if any code doesn't return 0, exit the script
set -o pipefail
# print each step of your code to the terminal
set -x
read -d '' HELP_STRING <<'EOF'
"This script is a testing and coverage reporting utility"
Usage:
./run_tests.sh <Options>
"Options:"
"-h: Display this help menu"
"-c: Run client tests"
"-s: Run server tests"
"-r: Submit coverage report to a coverage analysis tool"
example:
./run_tests.sh -h
./run_tests.sh -c
./run_tests.sh -s
./run_tests.sh -r
EOF
set -e
function export_env() {
export SECRET_KEY=django-react-library-app
export DB_USER=postgres
export DB_PASS=postgres
export DB_SERVICE=postgres
export DB_PORT=5432
export DB_NAME=postgres
export CLIENT_ID=somecrazy
}
while getopts "hcsrR:" opt; do
case "$opt" in
h)
echo "$HELP_STRING"
exit 0
;;
c)
TESTED_CLIENT=1
cd ../client
npm test
if [ $? -gt 0 ]; then
TESTS_FAILED=1
fi
;;
s)
TESTED_SERVER=1
cd ..
export_env
coverage run --source libraryapp,libraryapi manage.py test
if [ $? -gt 0 ]; then
TESTS_FAILED=1
fi
;;
r) SHOULD_REPORT=1
;;
esac
done
shift $((OPTIND-1))
[ "$1" = "--" ] && shift
if [ $TESTED_CLIENT -eq 0 -a $TESTED_SERVER -eq 0 ]; then
echo "$HELP_STRING"
exit 1
fi
if [ $SHOULD_REPORT -eq 1 ]; then
if [ $TESTED_CLIENT -eq 1 ]; then
cd client
npm run report
cd ..
fi
if [ $TESTED_SERVER -eq 1 ]; then
echo -e "\nPlease configure a coverage analysis solution for the server"
fi
fi
if [ $TESTS_FAILED -eq 1 ]; then
# https://github.com/ryanoasis/public-bash-scripts/blob/master/unix-color-codes.sh
echo -e "\n\n\033[31mSome tests failed.\033[0m\n"; exit 1
else
echo -e "\n\n\033[32mAll tests passed!\033[0m\n"; exit 0
fi
|
andela-sjames/Django-ReactJS-Library-App
|
reactlibapp/scripts/run_tests.sh
|
Shell
|
mit
| 1,920 |
#!/bin/bash
# Env JSON File
_envjson="env.json"
# Helper Functions
script_location() {
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
SOURCE="$DIR/$SOURCE"
done
echo $( cd -P "$( dirname "$SOURCE" )" && pwd )
}
get_port() {
if [[ -f $_envjson ]]; then
echo $(cat $_envjson | grep $1 | grep -o "[0-9]\+")
fi
}
console_log() {
printf "Starting $1"
[[ -n $2 ]] && printf " on port $2"
printf "...\n"
}
# Enter project parent dir
dir=$(dirname $(script_location))
cd $dir
# Exit if node_modules not found i.e. pre-{npm install}
if [[ ! -d node_modules ]]; then
echo "Please run [npm install] or [./bin/setup.sh] first"
exit 1
fi
# Get Port Numbers
_eport=$(get_port EXPRESS_PORT)
_lport=$(get_port LIVERELOAD_PORT)
# Start Express
console_log "Express server" $_eport
(node server.js &) >/dev/null
# Start Gulp
console_log "Gulp with LiveReload" $_lport
(./node_modules/gulp/bin/gulp.js watch &) >/dev/null
# NOTE: Possibly output to logfiles
|
geekjuice/headstart
|
bin/start.sh
|
Shell
|
mit
| 1,092 |
#!/bin/sh
docker pull coderdojo/boot2zen
|
niccokunzmann/cp-docker-development
|
docker/vm/build/01_pull_boot2zen.sh
|
Shell
|
mit
| 42 |
#as -o swtch.o swtch.s
#cc -c -o swtch.o swtch.s
cc -g -c -o swtch.o swtch2.S
cc -g -c -o thread.o thread.c
cc -g -c -o mem.o mem.c
cc -g -c -o fmt.o fmt.c
cc -g -c -o assert.o assert.c
cc -g -c -o except.o except.c
cc -g -c -o chan.o chan.c
ar ruv libcii.a mem.o fmt.o assert.o except.o thread.o swtch.o chan.o
cc -g -c -o sieve.o sieve.c
cc -g -o sieve sieve.o libcii.a
|
wonghoifung/reviewing-c
|
cii/build_sieve.sh
|
Shell
|
mit
| 374 |
#!/bin/sh
curl -O https://closure-library.googlecode.com/git/closure/goog/base.js
echo "Moving base google clousre to the build directory"
mv base.js build/
|
Schuck-Johnson/csp-channel
|
scripts/get_google_closure_base.sh
|
Shell
|
mit
| 157 |
#!/bin/sh
set -eo pipefail -o nounset
## Get .genome file
genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome
## Get GTF file
wget --quiet ftp://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_34/GRCh37_mapping/gencode.v34lift37.long_noncoding_RNAs.gtf.gz
cat <(gzip -dc gencode.v34lift37.long_noncoding_RNAs.gtf.gz | grep "^#") <(echo -e "#chrom\tsource\tfeature\tstart\tend\tscore\tstrand\tframe\tattribute") <(gzip -dc gencode.v34lift37.long_noncoding_RNAs.gtf.gz | grep -v "^#") \
| gsort /dev/stdin $genome \
| bgzip -c > hg19-long_noncoding_RNAs-gencode-v1.gtf.gz
tabix hg19-long_noncoding_RNAs-gencode-v1.gtf.gz
rm gencode.v34lift37.long_noncoding_RNAs.gtf.gz
|
gogetdata/ggd-recipes
|
recipes/genomics/Homo_sapiens/hg19/hg19-lncrna-annotations-chr-regions-gencode-v1/recipe.sh
|
Shell
|
mit
| 747 |
#!/usr/bin/env bash
# abort if we get any error
set -e
_tag=$1
_branch="$(git rev-parse --abbrev-ref HEAD)"
if [ "${_tag}" == "" ]; then
echo "Missing version param. ex './scripts/tag_release.sh v5.1.1'"
exit 1
fi
if [ "${_branch}" == "master" ]; then
echo "you cannot tag releases from the master branch"
echo "please checkout the release branch"
echo "ex 'git checkout v5.1.x'"
exit 1
fi
# always make sure to pull latest changes from origin
echo "pulling latest changes from ${_branch}"
git pull origin "${_branch}"
# create signed tag for latest commit
git tag -s "${_tag}" -m "release ${_tag}"
# verify the signed tag
git tag -v "${_tag}"
echo "Make sure the tag is signed as expected"
echo "press [y] to push the tags"
read -n 1 confirm
if [ "${confirm}" == "y" ]; then
git push origin "${_tag}"
else
git tag -d "${_tag}"
echo "Abort! "
fi
|
GridProtectionAlliance/openHistorian
|
Source/Applications/openHistorian/openHistorian/Grafana/scripts/tag_release.sh
|
Shell
|
mit
| 892 |
#!/bin/bash
set -x
# Script para Configurar o RTC e localtime
# As perguntas são interativas através de TUI
# Uso: /script/ssh.sh <cmd>
# <cmd>: --first primeira instalação
# --daily chamado por /etc/cron.daily
# <em branco> só altera localtime
# O RTC tem que ficar ajustado e sincronizado por NTP
# O NTP precisa usar servidores em grupos de vários continentes,
# o sistema automáticamente seleciona os mais próximos.
# Vários de cada grupo com origens diferentes são para redundancia.
# O localtime pode ser configurado para facilitar o uso do CRON
# pelo administrador na sua hora local
# Configuração do clock no CentOS 6
# http://dev.chetankjain.net/2012/04/fixing-date-time-and-zone-on-rhel-6.html?m=1
# http://thornelabs.net/2013/04/25/rhel-6-manually-change-time-zone.html
# https://www.centos.org/docs/5/html/5.1/Deployment_Guide/s2-sysconfig-clock.html
#=======================================================================
# Processa a linha de comando
CMD=$1
# Funções auxiliares
. /script/functions.sh
# Lê dados anteriores se existirem
. /script/info/distro.var
VAR_FILE="/script/info/clock.var"
[ -e $VAR_FILE ] && . $VAR_FILE
# o NTP chama o serviço de "ntp" no Debian/Ubuntu e "ntpd" no CentOS
if [ "$DISTRO_NAME" == "CentOS" ]; then
NTP=ntpd
else
NTP=ntp
fi
#-----------------------------------------------------------------------
# Configura o RTC (harware) para UTC
function RtcSetUtc(){
# Altera RTC (hardware) para UTC
hwclock systohc --utc
# no CentOS alterou /etc/adjtime (testar no Ubuntu)
if [ "$DISTRO_NAME" == "CentOS" ]; then
# Indica configuração UTC no /etc/sysconfig/clock
EditConfEqual /etc/sysconfig/clock UTC yes
else
# No Ubunttu é /etc/default/rcS
# http://askubuntu.com/questions/115963/how-do-i-migrate-to-utc-hardware-clock
echo "Ubuntu não implementado"
fi
# Acerta a nova hora usando servido ntp
ntpdate 0.us.pool.ntp.org
}
#-----------------------------------------------------------------------
# Configura o NTP
# Brasil: http://ntp.br/guia-linux-avancado.php
# USA: http://www.pool.ntp.org/zone/us
# Europa: http://www.pool.ntp.org/pt/zone/europe
function NtpConfigure(){
local NTP_ARQ=/etc/ntp.conf
# Guarda arquivo original
if [ ! -e $NTP_ARQ.orig ]; then
cp $NTP_ARQ $NTP_ARQ.orig
chmod 600 $NTP_ARQ
fi
# para o ntpd para reconfigurar
service $NTP stop
# Cria arquivo drift para habilitar a funcionalidade
touch /etc/ntp.drift
# a hora já foi sincronizada ao configurar UTC
#(não) ntpdate 0.us.pool.ntp.org
# cria arquivo de configuração
if [ ! -e $NTP_ARQ ]; then
cat <<- EOF > $NTP_ARQ
##################################################
## Configuração do NTPD
##################################################
## Depois de criado, não é mais alterado
# "memoria" para o ajuste automático da frequencia do micro
driftfile /etc/ntp.drift
# estatisticas não configuradas
# Seguraça: configuracoes de restricão de acesso
restrict default kod notrap nomodify nopeer noquery
restrict -6 default kod notrap nomodify nopeer noquery
# Seguraça: desabilitar comando monlist
disable monitor
# servidores públicos do projeto ntp.br
server a.st1.ntp.br iburst
server b.st1.ntp.br iburst
server c.st1.ntp.br iburst
server d.st1.ntp.br iburst
server gps.ntp.br iburst
server a.ntp.br iburst
server b.ntp.br iburst
server c.ntp.br iburst
# Servidores nos USA, são 600 servidores
server 0.us.pool.ntp.org iburst
server 1.us.pool.ntp.org iburst
server 2.us.pool.ntp.org iburst
server 3.us.pool.ntp.org iburst
# Servidores na Europa
server 0.europe.pool.ntp.org iburst
server 1.europe.pool.ntp.org iburst
server 2.europe.pool.ntp.org iburst
server 3.europe.pool.ntp.org iburst
# Servidores na Asia
server 0.asia.pool.ntp.org
server 1.asia.pool.ntp.org
server 2.asia.pool.ntp.org
server 3.asia.pool.ntp.org
EOF
fi
chmod 600 $NTP_ARQ
# re-ativa o ntpd
service $NTP start
# Mensagem de aviso informativo
MSG="\nSeu relógio RTC (harware) foi configurado para UTC"
MSG+="\n\nO Relógio ficará sicronizado usando o NTP"
MSG+="\n Foran configurados servidores que serão selecionados"
MSG+="\n automáticamente em 4 continentes:"
MSG+="\n Brasil, USA, Europa e Asia"
whiptail --title "$TITLE" --msgbox "$MSG" 13 70
}
#-----------------------------------------------------------------------
# main()
TITLE="NFAS - Configuração do Relógio"
if [ "$CMD" == "--first" ]; then
# Altera RTC (hardware) para UTC
RtcSetUtc
# Configura NTP
NtpConfigure
# cria chamada diária
ARQ=/etc/cron.daily/nfas-clock.sh
cat <<- EOF > $ARQ
#!/bin/bash
##################################################
## Reconfiguração do RTC
##################################################
# Chama diariamente para sincronizar o RTC (hardware clock)
/script/clock.sh --daily
EOF
chmod 700 $ARQ
elif [ "$CMD" == "--daily" ]; then
#-----------------------------------------------------------------------
# Ajustes diários, sincroniza relógio de harware
# https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/System_Administrators_Guide/sect-Configuring_the_Date_and_Time-hwclock.html
hwclock --systohc
else
#-----------------------------------------------------------------------
# Ajuste interativo, só localtime
echo ""
fi
#-----------------------------------------------------------------------
|
isaacvitor/nfas
|
script/clock.sh
|
Shell
|
mit
| 5,561 |
#
# Sets grep environment variables and defines grep aliases.
#
export GREP_COLOR='37;45'
alias grep='grep --color=auto'
|
danielbayerlein/zsh-plugins
|
grep.plugin.zsh
|
Shell
|
mit
| 123 |
#!/usr/bin/env bash
# Mother daemon collects system information and five-daemon-mgmt information and reacts.
# Keep things running with the traprestart function.
#
traprestart()
{
$0 "$$" &
exit 0
}
trap traprestart HUP TERM INT
# Mother Daemon by Keegan Bowen, 2014
# The Mother does a lot. Checks on her children ( the other daemons )
# Mom reacts to system warnings and does some cataloging and validation...
# You will need to configure the other daemons in the five-daemon-mgmt group
# before the mother-daemon will run properly.
# /var/tmp/keeper-daemon/keeper-daemon.sh
# /var/tmp/util-daemon/util-daemon.sh
# /var/tmp/cop-daemon/cop-daemon.sh
# /var/tmp/install-daemon/install-daemon.sh
mkdir -p /var/tmp/mother-daemon/log
touch /var/tmp/mother-daemon/log/mother.log
cat /etc/passwd | cut -'d': -f1 > /var/tmp/cop-daemon/users.list
cat /var/tmp/cop-daemon/users.list
echo "Those are the users allowed by the cop-daemon."
if [ -s /var/tmp/util-daemon/email.send ]
then
echo "EMAILING IS ON."
else
echo "EMAILING IS OFF."
fi
echo "DAEMONIZING..."
checkall() {
coppidval=$(pgrep cop-daemon.sh)
if [ -z $coppidval ]; then
touch /var/tmp/mother-daemon/cop.pid
ps auxwww | grep /var/tmp/cop-daemon/cop-daemon.sh$ | awk '{print $2}' | head -n1 > /var/tmp/mother-daemon/cop.pid
else
touch /var/tmp/mother-daemon/cop.pid
echo $coppidval > /var/tmp/mother-daemon/cop.pid
fi
keeperpidval=$(pgrep keeper-daemon.sh)
if [ -z $keeperpidval ]; then
touch /var/tmp/mother-daemon/keeper.pid
ps auxwww | grep /var/tmp/keeper-daemon/keeper-daemon.sh$ | awk '{print $2}' | head -n1 > /var/tmp/mother-daemon/keeper.pid
else
touch /var/tmp/mother-daemon/keeper.pid
echo $keeperpidval > /var/tmp/mother-daemon/keeper.pid
fi
installpidval=$(pgrep install-daemon.sh)
if [ -z $installpidval ]; then
touch /var/tmp/mother-daemon/install.pid
ps auxwww | grep /var/tmp/install-daemon/install-daemon.sh$ | awk '{print $2}' | head -n1 > /var/tmp/mother-daemon/install.pid
else
touch /var/tmp/mother-daemon/install.pid
echo $installpidval > /var/tmp/mother-daemon/install.pid
fi
utilpidval=$(pgrep util-daemon.sh)
if [ -z $utilpidval ]; then
touch /var/tmp/mother-daemon/util.pid
ps auxwww | grep /var/tmp/util-daemon/util-daemon.sh$ | awk '{print $2}' | head -n1 > /var/tmp/mother-daemon/util.pid
else
touch /var/tmp/mother-daemon/util.pid
echo $utilpidval > /var/tmp/mother-daemon/util.pid
fi
COPID=$(cat /var/tmp/mother-daemon/cop.pid)
KEEPID=$(cat /var/tmp/mother-daemon/keeper.pid)
INSTLID=$(cat /var/tmp/mother-daemon/install.pid)
UTILID=$(cat /var/tmp/mother-daemon/util.pid)
if [[ -s /var/tmp/mother-daemon/cop.pid ]]; then
echo "COP PID is $COPID"
else
echo "COP is not running."
fi
if [[ -s /var/tmp/mother-daemon/keeper.pid ]]; then
echo "KEEPER PID is $KEEPID"
else
echo "KEEPER is not running."
fi
if [[ -s /var/tmp/mother-daemon/install.pid ]]; then
echo "INSTALL PID is $INSTLID"
else
echo "INSTALL is not running."
fi
if [[ -s /var/tmp/mother-daemon/util.pid ]]; then
echo "UTIL PID is $UTILID"
else
echo "UTIL is not running."
fi
}
sanitycheck() {
date > /var/tmp/mother-daemon/log/sanity.log
find / | xargs stat -c '%s %n' >> /var/tmp/mother-daemon/log/sanity.log
SESH=$(date +"%m-%d-%y-%s")
tar czvf /var/tmp/mother-daemon/log/sanity."$SESH".tar.gz /var/tmp/keeper-daemon/ &&
# Uncomment and add an archive location:
# scp /var/tmp/mother-daemon/sanity."$SESH".tar.gz /mnt/archive/location
tar czvf /var/tmp/mother-daemon/sanity.catalog."$SESH".tar.gz /var/tmp/mother-daemon/sanity.log &&
# Uncomment and add an archive location:
# scp /var/tmp/mother-daemon/sanity.catalog."$SESH".tar.gz /mnt/archive/location
echo "Catalogs have been archived..."
}
tcpkill() {
cat /var/tmp/util-daemon/netstat.out >> /var/log/tcpkill.dat
}
warnresponse() {
df -h | grep "100%" > /var/tmp/mother-daemon/full.trigger
if [[ -s "/var/tmp/util-daemon/blacklist.warn" ]]; then
netstat -a | grep $(cat /var/tmp/util-daemon/blacklist*) | grep -v grep | cut -d' ' -f7; tcpkill
else
echo "No warning response triggered."
fi
}
alertresponse() {
df -h | grep "100%" > /var/tmp/mother-daemon/full.trigger
if [[ -s "/var/tmp/mother-daemon/full.trigger" ]]; then
DUTARGET=$(df -h | grep 100% | rev | cut -d'%' -f1 | rev )
DUOPEN1=$(df -h | grep [0-6][0-9]% | rev | cut -d'%' -f1 | rev )
DUOPEN2=$(df -h | grep [0-9]% | rev | cut -d'%' -f1 | rev )
cp /dev/null "$DUTARGET"/*.log &
cp /dev/null "$DUTARGET"/*/*.log &
cp /dev/null "$DUTARGET"/*/*/*.log &
cp /dev/null "$DUTARGET"/*/*/*/*.log &
cp /dev/null "$DUTARGET"/*/*/*/*/*.log &
cp /dev/null "$DUTARGET"/*/*/*/*/*/*.log &
cp /dev/null /var/tmp/mother-daemon/mother.log
mkdir -p ${DUOPEN1}/tmp-storage/
mkdir -p ${DUOPEN2}/tmp-storage/
else
echo "No alert response triggered."
fi
}
warntrig() {
for warn in $(ls /var/tmp/util-daemon/log/*warn); do
echo "$warn" >> /var/tmp/mother-daemon/log/mother.log
warnresponse
done
for alert in $(ls /var/tmp/util-daemon/log/*alert); do
echo "$alert" >> /var/tmp/mother-daemon/log/mother.log
alertresponse
done
}
# And now the deep dark loop
sanitycheck
while true; do
warntrig
checkall
sleep $(cat /var/tmp/mother-daemon/mother.sleep)
done
|
jpegleg/five-daemon-mgmt
|
mother-daemon.sh
|
Shell
|
mit
| 5,591 |
#!/usr/bin/env bash
parallelshell 'tools/start.sh' 'tools/watch.sh' 'tools/browser-sync.sh'
|
bbviana/alexandria
|
tools/dev.sh
|
Shell
|
mit
| 93 |
#!/bin/bash
set -xe
mysqladmin -uroot -hdb create hakoblog || :
cat db/schema.sql | mysql -uroot -hdb hakoblog
mysqladmin -uroot -hdb create hakoblog_test || :
cat db/schema.sql | mysql -uroot -hdb hakoblog_test
|
hakobe/hakoblog-python
|
prepare-db.sh
|
Shell
|
mit
| 215 |
#!/bin/bash
# install openvpn
apt-get install openvpn
|
danielfaulknor/ibd-packer
|
packer/scripts/debian/openvpn.sh
|
Shell
|
mit
| 55 |
p 'g++ -S hello.cxx'
p 'c++filt -n <hello.s |fold'
|
osamuaoki/fun2prog
|
hello/cxx/hello-s.sh
|
Shell
|
mit
| 51 |
#!/bin/sh
###
sed 's/_/\./g' ../temp/jur.h.amp > ../temp/jur.h.1
sed 's/_/\./g' ../temp/jur.d.amp > ../temp/jur.d.1
sed 's/\\/ \\textbackslash /g' ../temp/jur.d.1 > ../temp/jur.d.2
sed 's/\\/ \\textbackslash /g' ../temp/jur.h.1 > ../temp/jur.h.2
sed 's/$/ \\\\ /g' ../temp/jur.d.2 > ../temp/jur.d.bs
sed 's/$/ \\\\ /g' ../temp/jur.h.2 > ../temp/jur.h.bs
###
sed 's/_/\./g' ../temp/medical.h.amp > ../temp/medical.h.1
sed 's/_/\./g' ../temp/medical.d.amp > ../temp/medical.d.1
sed 's/\\/ \\textbackslash /g' ../temp/medical.d.1 > ../temp/medical.d.2
sed 's/\\/ \\textbackslash /g' ../temp/medical.h.1 > ../temp/medical.h.2
sed 's/$/ \\\\ /g' ../temp/medical.d.2 > ../temp/medical.d.bs
sed 's/$/ \\\\ /g' ../temp/medical.h.2 > ../temp/medical.h.bs
###
sed 's/_/\./g' ../temp/mpfr.h.amp > ../temp/mpfr.h.1
sed 's/_/\./g' ../temp/mpfr.d.amp > ../temp/mpfr.d.1
sed 's/\\/ \\textbackslash /g' ../temp/mpfr.d.1 > ../temp/mpfr.d.2
sed 's/\\/ \\textbackslash /g' ../temp/mpfr.h.1 > ../temp/mpfr.h.2
sed 's/$/ \\\\ /g' ../temp/mpfr.d.2 > ../temp/mpfr.d.bs
sed 's/$/ \\\\ /g' ../temp/mpfr.h.2 > ../temp/mpfr.h.bs
###
sed 's/_/\./g' ../temp/multi.h.amp > ../temp/multi.h.1
sed 's/_/\./g' ../temp/multi.d.amp > ../temp/multi.d.1
sed 's/\\/ \\textbackslash /g' ../temp/multi.d.1 > ../temp/multi.d.2
sed 's/\\/ \\textbackslash /g' ../temp/multi.h.1 > ../temp/multi.h.2
sed 's/$/ \\\\ /g' ../temp/multi.d.2 > ../temp/multi.d.bs
sed 's/$/ \\\\ /g' ../temp/multi.h.2 > ../temp/multi.h.bs
###
sed 's/_/\./g' ../temp/medical.h.amp > ../temp/medical.h.1
sed 's/_/\./g' ../temp/medical.d.amp > ../temp/medical.d.1
sed 's/\\/ \\textbackslash /g' ../temp/medical.d.1 > ../temp/medical.d.2
sed 's/\\/ \\textbackslash /g' ../temp/medical.h.1 > ../temp/medical.h.2
sed 's/$/ \\\\ /g' ../temp/medical.d.2 > ../temp/medical.d.bs
sed 's/$/ \\\\ /g' ../temp/medical.h.2 > ../temp/medical.h.bs
###
sed 's/_/\./g' ../temp/short_1d21.h.amp > ../temp/short_1d21.h.1
sed 's/_/\./g' ../temp/short_1d21.d.amp > ../temp/short_1d21.d.1
sed 's/\\/ \\textbackslash /g' ../temp/short_1d21.d.1 > ../temp/short_1d21.d.2
sed 's/\\/ \\textbackslash /g' ../temp/short_1d21.h.1 > ../temp/short_1d21.h.2
sed 's/$/ \\\\ /g' ../temp/short_1d21.d.2 > ../temp/short_1d21.d.bs
sed 's/$/ \\\\ /g' ../temp/short_1d21.h.2 > ../temp/short_1d21.h.bs
|
michal-fre/refugee-phrasebook.github.io
|
bash-scripts-for-pdf-generation/scripts/07_replace_shxxx.sh
|
Shell
|
mit
| 2,349 |
#!/bin/sh
# http://www.gnu.org/software/bash/manual/bash.html#Bash-Conditional-Expressions
# git status --porcelain | grep "^A" | cut -c 4-
log_info () { echo "--- INF ---: $1"; }
log_error () { echo "--- ERR ---: $1"; }
# PROJECT_PATH=$(pwd)
PROJECT_PATH=$1
SKIP_COMMIT=1
alias git='git -C $PROJECT_PATH'
log_info "current project: $PROJECT_PATH"
if [[ -z $(git status 2>/dev/null) ]]; then
log_info "not a git repository"
else
# current branch
# work with git v1.7+
CURRENT_BRANCH=`git rev-parse --abbrev-ref HEAD`
# work with git v1.8+
# git symbolic-ref --short HEAD
log_info "current branch: $CURRENT_BRANCH"
if [[ -z `git status --porcelain` ]]; then
log_info "nothing to commit"
git pull origin $CURRENT_BRANCH
exit
fi
if [[ $SKIP_COMMIT -eq 1 ]]; then
N_TITLE="$PROJECT_PATH"
N_CONTENT="branch $CURRENT_BRANCH has uncommitted files."
[[ $(uname) = 'Darwin' ]] && \
osascript -e "display notification \"$N_CONTENT\" with title \"$N_TITLE\""
exit
fi
# add all
git add --all
# commit
git commit -m "[AUTO COMMIT] $(date +'%Y-%m-%d %T')"
# auto push
git push origin $CURRENT_BRANCH
fi
|
xieyunzi/dotfiles
|
crontab/.bin/auto_commit.sh
|
Shell
|
mit
| 1,170 |
#!/bin/sh
#
# geninfocontrib_h.sh - infocontrib.h generator script
#
# written by Marco van den Heuvel <[email protected]>
# use system echo as it supports backslash expansion
ECHO=/bin/echo
rm -f try.tmp
$ECHO "\\\\n" >try.tmp
n1=`cat try.tmp | wc -c`
n2=`expr $n1 + 0`
if test x"$n2" = "x3"; then
linefeed="\\\\n"
else
linefeed="\\n"
fi
$ECHO "/*"
$ECHO " * infocontrib.h - Text of contributors to VICE, as used in info.c"
$ECHO " *"
$ECHO " * Autogenerated by geninfocontrib_h.sh, DO NOT EDIT !!!"
$ECHO " *"
$ECHO " * Written by"
$ECHO " * Marco van den Heuvel <[email protected]>"
$ECHO " *"
$ECHO " * This file is part of VICE, the Versatile Commodore Emulator."
$ECHO " * See README for copyright notice."
$ECHO " *"
$ECHO " * This program is free software; you can redistribute it and/or modify"
$ECHO " * it under the terms of the GNU General Public License as published by"
$ECHO " * the Free Software Foundation; either version 2 of the License, or"
$ECHO " * (at your option) any later version."
$ECHO " *"
$ECHO " * This program is distributed in the hope that it will be useful,"
$ECHO " * but WITHOUT ANY WARRANTY; without even the implied warranty of"
$ECHO " * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the"
$ECHO " * GNU General Public License for more details."
$ECHO " *"
$ECHO " * You should have received a copy of the GNU General Public License"
$ECHO " * along with this program; if not, write to the Free Software"
$ECHO " * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA"
$ECHO " * 02111-1307 USA."
$ECHO " *"
$ECHO " */"
$ECHO ""
$ECHO "#ifndef VICE_INFOCONTRIB_H"
$ECHO "#define VICE_INFOCONTRIB_H"
$ECHO ""
$ECHO "const char info_contrib_text[] ="
checkoutput()
{
dooutput=yes
case "$data" in
@c*|"@itemize @bullet"|@item|"@end itemize") dooutput=no ;;
esac
}
outputok=no
while read data
do
if test x"$data" = "x@node Copyright, Contacts, Acknowledgments, Top"; then
$ECHO "\"$linefeed\";"
$ECHO "#endif"
outputok=no
fi
if test x"$outputok" = "xyes"; then
checkoutput
if test x"$dooutput" = "xyes"; then
if test x"$data" = "x"; then
$ECHO "\"$linefeed\""
else
$ECHO "\" $data$linefeed\""
fi
fi
fi
if test x"$data" = "x@chapter Acknowledgments"; then
outputok=yes
fi
done
|
century-arcade/src
|
c64/vice-2.4/src/geninfocontrib_h.sh
|
Shell
|
mit
| 2,359 |
#!/bin/bash
# Copyright (C) 2016-2017 Claudio Luck <[email protected]>
#
# MIT License
export LANG=C
export LC_ALL=C
function on_exit {
[ -z "$NF" -o ! -e "$NF" ] || rm -f "$NF"
[ -z "$NC" -o ! -e "$NC" ] || rm -f "$NC"
}
trap on_exit EXIT
NF=$(mktemp)
NC=$(mktemp)
mkdir -p /etc/nginx/proxy-trusts.pem.d
while read -d $'\0' PEM ; do
HN=$(basename $(basename "$PEM" .pem) .cert)
openssl s_client -showcerts -connect "$HN":443 -CAfile "$PEM" >$NF 2>&1 </dev/null
sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' $NF >$NC
TEST=$(grep 'Verify return code' $NF | head -n1)
echo "$HN: $TEST"
if [ -s $NC ] && ! cmp --silent $NC $PEM ; then
echo "$HN: Certificate has changed!"
cp -f $NC ${PEM}.new
elif [ ! -s $NC ] ; then
cat $NF
fi
done < <(find /etc/nginx/proxy-trusts.pem.d/ -name \*.pem -print0)
|
cluck/zeteco-infra
|
pastebin/loadbalancer/CHECK_proxy-trusts.sh
|
Shell
|
mit
| 875 |
# Meteor packages used by this project, one per line.
#
# 'meteor add' and 'meteor remove' will edit this file for you,
# but you can also edit it by hand.
# Meteor Packages
meteor add standard-app-packages
meteor add backbone
meteor add accounts-base
meteor add accounts-password
meteor add accounts-twitter
meteor add spiderable
meteor add email
meteor add accounts-facebook
meteor add service-configuration
meteor add accounts-ui
meteor add reactive-var
meteor add http
# Atmosphere Packages
meteor add iron:router
meteor add meteorhacks:fast-render
meteor add meteorhacks:subs-manager
meteor add meteorhacks:npm
meteor add aldeed:autoform
meteor add aldeed:collection2
meteor add aldeed:simple-schema
meteor add mrt:jquery-hotkeys
meteor add mrt:cookies
meteor add ccan:cssreset # CSS reset (Must come before any other css)
meteor add cmather:handlebars-server
meteor add sacha:spin
meteor add sacha:juice
meteor add chuangbo:marked
meteor add percolatestudio:synced-cron
meteor add npm-container
meteor add matb33:collection-hooks
meteor add djedi:sanitize-html
meteor add rajit:bootstrap3-datepicker
meteor add bengott:avatar
meteor add jparker:gravatar
meteor add tap:i18n
meteor add useraccounts:unstyled
meteor add tsega:bootstrap3-datetimepicker_1
meteor add momentjs:moment
meteor add aslagle:reactive-table
# Testing
# sanjo:jasmine
# velocity:html-reporter
# Telescope Packages (Required)
meteor add telescope-base
meteor add telescope-lib
meteor add telescope-i18n
# Telescope Packages (Optional)
meteor add telescope-datetimepicker
meteor add telescope-theme-base
meteor add telescope-module-share
meteor add telescope-api
meteor add telescope-rss
meteor add telescope-search
meteor add telescope-tags
meteor add telescope-theme-hubble
meteor add telescope-email
meteor add telescope-embedly
meteor add telescope-newsletter
meteor add telescope-daily
meteor add telescope-update-prompt
meteor add telescope-kadira
meteor add telescope-notifications
meteor add telescope-singleday
meteor add telescope-invites
meteor add telescope-post-by-feed
meteor add telescope-releases
meteor add telescope-getting-started
meteor add telescope-subscribe-to-posts
meteor add telescope-tagline-banner
# Custom Packages
|
theartsnetwork/artytrends
|
packages-add.sh
|
Shell
|
mit
| 2,234 |
git clone https://github.com/probmods/webppl.git ../webppl
git clone https://github.com/stuhlmueller/webppl-timeit.git ../webppl-timeit
git clone https://github.com/stuhlmueller/webppl-dp.git ../webppl-dp
git clone https://github.com/probmods/webppl-viz ../webppl-viz
npm link ../webppl-timeit
npm link ../webppl-dp
npm link ../webppl-viz
npm link ../webppl
npm install grunt uglify browserify
npm install -g grunt-cli
npm install
|
jsalvatier/webppl-agents
|
install.sh
|
Shell
|
mit
| 435 |
#!/usr/bin/env bash
cd "$(dirname "${BASH_SOURCE}")"
# echo "Updating local 'dotfiles' workspace via git ..."
# git pull origin master || exit 1
function doIt() {
echo "Copying local 'dotfiles' to home directory ..."
rsync --exclude ".git/" --exclude ".DS_Store" --exclude "install.sh" \
--exclude "README.md" --exclude "LICENSE" -av --no-perms . ~
source ~/.bash_profile
}
if [ "$1" == "--force" -o "$1" == "-f" ]; then
doIt
else
read -p "This may overwrite existing files in your home directory. Are you sure? (y/n) " -n 1
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
doIt
fi
fi
unset doIt
|
stevenbaker/dotfiles
|
install.sh
|
Shell
|
mit
| 607 |
#!/usr/bin/env bash
###############################################################################
# Displays
###############################################################################
# Automatically adjust brightness
defaults write com.apple.BezelServices dAuto -bool true
sudo defaults write /Library/Preferences/com.apple.iokit.AmbientLightSensor "Automatic Display Enabled" -bool true
# Subpixel font rendering on non-Apple LCDs
# 0 : Disabled
# 1 : Minimal
# 2 : Medium
# 3 : Smoother
# 4 : Strong
defaults write NSGlobalDomain AppleFontSmoothing -int 2
# Enable HiDPI display modes (requires restart)
sudo defaults write /Library/Preferences/com.apple.windowserver DisplayResolutionEnabled -bool true
# Show mirroring options in the menu bar when available
defaults write com.apple.airplay showInMenuBarIfPresent -bool true
|
joeyhoer/starter
|
system/displays.sh
|
Shell
|
mit
| 842 |
function peco-pkill() {
for pid in $(ps aux | peco --prompt "[peco-pkill]" | awk '{ print $2 }'); do
kill $pid
echo "Killed ${pid}"
done
}
alias pk="peco-pkill"
|
togatoga/dotfiles
|
.peco/peco-pkill.zsh
|
Shell
|
mit
| 167 |
#!/bin/sh -x
# http://blog.altermundi.net/article/playing-with-ath9k-spectral-scan/
# https://github.com/simonwunderlich/FFT_eval
phy=phy0
dev=wlan0
dbg=/sys/kernel/debug/ieee80211/$phy/ath9k
tmp=/tmp/fft_$$
if [ $(id -u) -eq 0 ]; then
if ! [ -d "$dbg" ]; then
echo "Missing $dbg, you need CONFIG_ATH9K_DEBUGFS=y" >&2
exit 1
fi
echo 'chanscan' > $dbg/spectral_scan_ctl
iw $dev scan
cat $dbg/spectral_scan0 > "$1"
echo 'disable' > $dbg/spectral_scan_ctl
else
touch "$tmp"
if sudo "$0" "$tmp" > /dev/null; then
(cd ~/src/misc/FFT_eval && ./fft_eval "$tmp")
#(cd ~/src/misc/ath_spectral/UI &&
# LD_LIBRARY_PATH=qwt/lib athScan/athScan "$tmp")
fi
fi
|
grawity/bin
|
spectral-scan.sh
|
Shell
|
mit
| 668 |
#!/usr/bin/env bash
echo '
------------------------------------------------------------
LOKRA STUDIO
- Project: ui-starter
- Description: build script
------------------------------------------------------------
'
rm -rf dist/
echo '
------------------------------------------------------------
Task: Build Product
------------------------------------------------------------
'
./node_modules/.bin/ng build --target=production --environment=prod --aot=true --sourcemap=false
echo '
------------------------------------------------------------
Task: Build Zip File
------------------------------------------------------------
'
cd ..
rm -rf .ui-starter.tmp/
mkdir .ui-starter.tmp
cp -R ui-starter .ui-starter.tmp/
cd .ui-starter.tmp/ui-starter/
rm -rf ./node_modules/ ./.idea/ ./.git/ build.sh ./dist/
cd ..
zip -qr ./ui-starter.zip ./*
mv ./ui-starter.zip ../
cd ..
rm -rf .ui-starter.tmp/
cd ui-starter/
echo '
------------------------------------------------------------
Finished
------------------------------------------------------------
'
|
marulinho/Proyecto-Angular-Final
|
build.sh
|
Shell
|
mit
| 1,047 |
#!/bin/bash
docker build -t nodespeed-ide ..
|
whoGloo/nodespeed-ide
|
docker/make.sh
|
Shell
|
mit
| 46 |
docker-compose down && docker-compose up --build -d && docker-compose ps
|
idchlife/typescript-project-starter-1
|
start-app.sh
|
Shell
|
mit
| 72 |
#!/bin/bash
#
# Run the epan unit tests
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
wslua_step_dir_test() {
if [ $HAVE_LUA -ne 0 ]; then
test_step_skipped
return
fi
# Tshark catches lua script failures, so we have to parse the output.
$TSHARK -r $CAPTURE_DIR/empty.pcap -X lua_script:$TESTS_DIR/lua/dir.lua > testout.txt 2>&1
if grep -q "All tests passed!" testout.txt; then
test_step_ok
else
cat testout.txt
test_step_failed "didn't find pass marker"
fi
}
wslua_step_dissector_test() {
if [ $HAVE_LUA -ne 0 ]; then
test_step_skipped
return
fi
# First run tshark with the dissector script.
$TSHARK -r $CAPTURE_DIR/dns_port.pcap -V -X lua_script:$TESTS_DIR/lua/dissector.lua > testin.txt 2>&1
RETURNVALUE=$?
if [ ! $RETURNVALUE -eq $EXIT_OK ]; then
echo
cat ./testin.txt
test_step_failed "subtest-1 exit status of $DUT: $RETURNVALUE"
return
fi
# then run tshark again with the verification script. (it internally reads in testin.txt)
$TSHARK -r $CAPTURE_DIR/empty.pcap -X lua_script:$TESTS_DIR/lua/verify_dissector.lua > testout.txt 2>&1
grep -q "All tests passed!" testout.txt
if [ $? -ne 0 ]; then
cat ./testin.txt
cat ./testout.txt
test_step_failed "subtest-1 didn't find pass marker"
fi
# run tshark with the dissector script again, but in mode 2.
$TSHARK -r $CAPTURE_DIR/dns_port.pcap -V -X lua_script:$TESTS_DIR/lua/dissector.lua -X lua_script1:heur_regmode=2 > testin.txt 2>&1
RETURNVALUE=$?
if [ ! $RETURNVALUE -eq $EXIT_OK ]; then
echo
cat ./testin.txt
test_step_failed "subtest-1 exit status of $DUT: $RETURNVALUE"
return
fi
# then run tshark again with the verification script. (it internally reads in testin.txt)
$TSHARK -r $CAPTURE_DIR/empty.pcap -X lua_script:$TESTS_DIR/lua/verify_dissector.lua -X lua_script1:no_heur > testout.txt 2>&1
grep -q "All tests passed!" testout.txt
if [ $? -ne 0 ]; then
cat ./testin.txt
cat ./testout.txt
test_step_failed "subtest-1 didn't find pass marker"
fi
# run tshark with the dissector script again, but in mode 3.
$TSHARK -r $CAPTURE_DIR/dns_port.pcap -V -X lua_script:$TESTS_DIR/lua/dissector.lua -X lua_script1:heur_regmode=3 > testin.txt 2>&1
RETURNVALUE=$?
if [ ! $RETURNVALUE -eq $EXIT_OK ]; then
echo
cat ./testin.txt
test_step_failed "subtest-1 exit status of $DUT: $RETURNVALUE"
return
fi
# then run tshark again with the verification script. (it internally reads in testin.txt)
$TSHARK -r $CAPTURE_DIR/empty.pcap -X lua_script:$TESTS_DIR/lua/verify_dissector.lua -X lua_script1:no_heur > testout.txt 2>&1
if grep -q "All tests passed!" testout.txt; then
test_step_ok
else
echo
cat ./testin.txt
cat ./testout.txt
test_step_failed "didn't find pass marker"
fi
}
wslua_step_field_test() {
if [ $HAVE_LUA -ne 0 ]; then
test_step_skipped
return
fi
# Tshark catches lua script failures, so we have to parse the output.
$TSHARK -r $CAPTURE_DIR/dhcp.pcap -X lua_script:$TESTS_DIR/lua/field.lua > testout.txt 2>&1
if grep -q "All tests passed!" testout.txt; then
test_step_ok
else
cat testout.txt
test_step_failed "didn't find pass marker"
fi
}
wslua_step_file_test() {
if [ $HAVE_LUA -ne 0 ]; then
test_step_skipped
return
fi
# First run tshark with the pcap_file_reader script.
$TSHARK -r $CAPTURE_DIR/dhcp.pcap -X lua_script:$TESTS_DIR/lua/pcap_file.lua > testin.txt 2>&1
$TSHARK -r $CAPTURE_DIR/wpa-Induction.pcap.gz -X lua_script:$TESTS_DIR/lua/pcap_file.lua >> testin.txt 2>&1
RETURNVALUE=$?
if [ ! $RETURNVALUE -eq $EXIT_OK ]; then
echo
cat ./testin.txt
test_step_failed "subtest-1 exit status of $DUT: $RETURNVALUE"
return
fi
# then run tshark again without the script
$TSHARK -r $CAPTURE_DIR/dhcp.pcap > testout.txt 2>&1
$TSHARK -r $CAPTURE_DIR/wpa-Induction.pcap.gz >> testout.txt 2>&1
RETURNVALUE=$?
if [ ! $RETURNVALUE -eq $EXIT_OK ]; then
echo
cat ./testout.txt
test_step_failed "subtest-2 exit status of $DUT: $RETURNVALUE"
return
fi
# now compare the two files - they should be identical
if diff -q ./testin.txt ./testout.txt; then
rm ./testin.txt
else
echo
cat ./testin.txt
cat ./testout.txt
test_step_failed "subtest-3 reading the pcap file with Lua did not match internal"
return
fi
# Now generate a new capture file using the Lua writer.
$TSHARK -r $CAPTURE_DIR/dhcp.pcap -X lua_script:$TESTS_DIR/lua/pcap_file.lua -w testin.txt -F lua_pcap2 > testout.txt 2>&1
RETURNVALUE=$?
if [ ! $RETURNVALUE -eq $EXIT_OK ]; then
echo
cat ./testout.txt
test_step_failed "subtest-4 exit status of $DUT: $RETURNVALUE"
return
fi
# now compare the two files - they should be identical
if diff -q $CAPTURE_DIR/dhcp.pcap ./testin.txt; then
rm ./testin.txt
else
echo
cat ./testout.txt
test_step_failed "subtest-5 creating a new pcap file using Lua did not match dhcp.cap"
return
fi
# Now read an acme sipmsg.log using the acme Lua reader, writing it out as pcapng.
$TSHARK -r $CAPTURE_DIR/sipmsg.log -X lua_script:$TESTS_DIR/lua/acme_file.lua -w testin.txt -F pcapng > testout.txt 2>&1
RETURNVALUE=$?
if [ ! $RETURNVALUE -eq $EXIT_OK ]; then
echo
cat ./testout.txt
test_step_failed "subtest-6 exit status of $DUT: $RETURNVALUE"
return
fi
# testin.txt is now a pcapng, read it out using -V verbose into testout.txt
$TSHARK -r ./testin.txt -V > testout.txt 2>&1
RETURNVALUE=$?
if [ ! $RETURNVALUE -eq $EXIT_OK ]; then
echo
cat ./testout.txt
test_step_failed "subtest-7 exit status of $DUT: $RETURNVALUE"
return
fi
# now readout sip.pcapng into testin.txt using -V verbose
$TSHARK -r $CAPTURE_DIR/sip.pcapng -V > testin.txt 2>&1
RETURNVALUE=$?
if [ ! $RETURNVALUE -eq $EXIT_OK ]; then
echo
cat ./testin.txt
test_step_failed "subtest-8 exit status of $DUT: $RETURNVALUE"
return
fi
# now compare testin and testout - they should be identical
if diff -q ./testout.txt ./testin.txt; then
test_step_ok
else
echo
cat ./testout.txt
diff ./testout.txt ./testin.txt
test_step_failed "subtest-9 writing the acme sipmsg.log out as pcapng did not match sip.pcapng"
fi
}
wslua_step_listener_test() {
if [ $HAVE_LUA -ne 0 ]; then
test_step_skipped
return
fi
# Tshark catches lua script failures, so we have to parse the output.
$TSHARK -r $CAPTURE_DIR/dhcp.pcap -X lua_script:$TESTS_DIR/lua/listener.lua > testout.txt 2>&1
if grep -q "All tests passed!" testout.txt; then
test_step_ok
else
cat testout.txt
test_step_failed "didn't find pass marker"
fi
}
wslua_step_nstime_test() {
if [ $HAVE_LUA -ne 0 ]; then
test_step_skipped
return
fi
# Tshark catches lua script failures, so we have to parse the output.
$TSHARK -r $CAPTURE_DIR/dhcp.pcap -X lua_script:$TESTS_DIR/lua/nstime.lua > testout.txt 2>&1
if grep -q "All tests passed!" testout.txt; then
test_step_ok
else
cat testout.txt
test_step_failed "didn't find pass marker"
fi
}
wslua_step_pinfo_test() {
if [ $HAVE_LUA -ne 0 ]; then
test_step_skipped
return
fi
# Tshark catches lua script failures, so we have to parse the output.
$TSHARK -r $CAPTURE_DIR/dhcp.pcap -X lua_script:$TESTS_DIR/lua/nstime.lua > testout.txt 2>&1
if grep -q "All tests passed!" testout.txt; then
test_step_ok
else
cat testout.txt
test_step_failed "didn't find pass marker"
fi
}
wslua_step_proto_test() {
if [ $HAVE_LUA -ne 0 ]; then
test_step_skipped
return
fi
# First run tshark with the proto script.
$TSHARK -r $CAPTURE_DIR/dns_port.pcap -V -X lua_script:$TESTS_DIR/lua/proto.lua > testin.txt 2>&1
grep -q "All tests passed!" testin.txt
if [ $? -ne 0 ]; then
cat ./testin.txt
test_step_failed "didn't find pass marker"
fi
# then run tshark again with the verification script. (it internally reads in testin.txt)
$TSHARK -r $CAPTURE_DIR/empty.pcap -X lua_script:$TESTS_DIR/lua/verify_dissector.lua > testout.txt 2>&1
if grep -q "All tests passed!" testout.txt; then
test_step_ok
else
echo
cat ./testin.txt
cat ./testout.txt
test_step_failed "didn't find pass marker"
fi
}
wslua_step_int64_test() {
if [ $HAVE_LUA -ne 0 ]; then
test_step_skipped
return
fi
# Tshark catches lua script failures, so we have to parse the output.
$TSHARK -r $CAPTURE_DIR/empty.pcap -X lua_script:$TESTS_DIR/lua/int64.lua > testout.txt 2>&1
if grep -q "All tests passed!" testout.txt; then
test_step_ok
else
echo
cat ./testout.txt
test_step_failed "didn't find pass marker"
fi
}
wslua_step_args_test() {
if [ $HAVE_LUA -ne 0 ]; then
test_step_skipped
return
fi
# Tshark catches lua script failures, so we have to parse the output.
$TSHARK -r $CAPTURE_DIR/empty.pcap -X lua_script:$TESTS_DIR/lua/script_args.lua -X lua_script1:1 > testout.txt 2>&1
grep -q "All tests passed!" testout.txt
if [ $? -ne 0 ]; then
cat testout.txt
test_step_failed "lua_args_test test 1 failed"
fi
$TSHARK -r $CAPTURE_DIR/empty.pcap -X lua_script:$TESTS_DIR/lua/script_args.lua -X lua_script1:3 -X lua_script1:foo -X lua_script1:bar > testout.txt 2>&1
grep -q "All tests passed!" testout.txt
if [ $? -ne 0 ]; then
cat testout.txt
test_step_failed "lua_args_test test 2 failed"
fi
$TSHARK -r $CAPTURE_DIR/empty.pcap -X lua_script:$TESTS_DIR/lua/script_args.lua -X lua_script:$TESTS_DIR/lua/script_args.lua -X lua_script1:3 -X lua_script2:1 -X lua_script1:foo -X lua_script1:bar > testout.txt 2>&1
grep -q "All tests passed!" testout.txt
if [ $? -ne 0 ]; then
cat testout.txt
test_step_failed "lua_args_test test 3 failed"
fi
$TSHARK -r $CAPTURE_DIR/empty.pcap -X lua_script:$TESTS_DIR/lua/script_args.lua > testout.txt 2>&1
if grep -q "All tests passed!" testout.txt; then
cat testout.txt
test_step_failed "lua_args_test negative test 4 failed"
fi
$TSHARK -r $CAPTURE_DIR/empty.pcap -X lua_script:$TESTS_DIR/lua/script_args.lua -X lua_script1:3 > testout.txt 2>&1
if grep -q "All tests passed!" testout.txt; then
cat testout.txt
test_step_failed "lua_args_test negative test 5 failed"
fi
test_step_ok
}
wslua_step_globals_test() {
if [ $HAVE_LUA -ne 0 ]; then
test_step_skipped
return
fi
# Tshark catches lua script failures, so we have to parse the output.
$TSHARK -r $CAPTURE_DIR/empty.pcap -X lua_script:$TESTS_DIR/lua/verify_globals.lua -X lua_script1:$TESTS_DIR/lua/ -X lua_script1:$TESTS_DIR/lua/globals_1.8.txt > testout.txt 2>&1
grep -q "All tests passed!" testout.txt
if [ $? -ne 0 ]; then
cat testout.txt
test_step_failed "lua_globals_test test 1 failed"
fi
$TSHARK -r $CAPTURE_DIR/empty.pcap -X lua_script:$TESTS_DIR/lua/verify_globals.lua -X lua_script1:$TESTS_DIR/lua/ -X lua_script1:$TESTS_DIR/lua/globals_1.10.txt > testout.txt 2>&1
grep -q "All tests passed!" testout.txt
if [ $? -ne 0 ]; then
cat testout.txt
test_step_failed "lua_globals_test test 2 failed"
fi
test_step_ok
}
wslua_step_gregex_test() {
if [ $HAVE_LUA -ne 0 ]; then
test_step_skipped
return
fi
# Tshark catches lua script failures, so we have to parse the output.
$TSHARK -r $CAPTURE_DIR/empty.pcap -X lua_script:$TESTS_DIR/lua/gregex.lua -X lua_script1:-d$TESTS_DIR/lua/ -X lua_script1:glib -X lua_script1:-V > testout.txt 2>&1
if grep -q "All tests passed!" testout.txt; then
test_step_ok
else
cat testout.txt
test_step_failed "didn't find pass marker"
fi
}
wslua_step_struct_test() {
if [ $HAVE_LUA -ne 0 ]; then
test_step_skipped
return
fi
# Tshark catches lua script failures, so we have to parse the output.
$TSHARK -r $CAPTURE_DIR/empty.pcap -X lua_script:$TESTS_DIR/lua/struct.lua > testout.txt 2>&1
if grep -q "All tests passed!" testout.txt; then
test_step_ok
else
cat testout.txt
test_step_failed "didn't find pass marker"
fi
}
wslua_step_tvb_test() {
if [ $HAVE_LUA -ne 0 ]; then
test_step_skipped
return
fi
# Tshark catches lua script failures, so we have to parse the output.
# perform this twice: once with a tree, once without
$TSHARK -r $CAPTURE_DIR/dns_port.pcap -X lua_script:$TESTS_DIR/lua/tvb.lua -V > testout.txt 2>&1
grep -q "All tests passed!" testout.txt
if [ $? -ne 0 ]; then
cat testout.txt
test_step_failed "lua_args_test test 1 failed"
fi
$TSHARK -r $CAPTURE_DIR/dns_port.pcap -X lua_script:$TESTS_DIR/lua/tvb.lua > testout.txt 2>&1
if grep -q "All tests passed!" testout.txt; then
test_step_ok
else
cat testout.txt
test_step_failed "didn't find pass marker"
fi
}
wslua_cleanup_step() {
rm -f ./testout.txt
rm -f ./testin.txt
}
wslua_suite() {
test_step_set_pre wslua_cleanup_step
test_step_set_post wslua_cleanup_step
test_step_add "wslua dir" wslua_step_dir_test
test_step_add "wslua dissector" wslua_step_dissector_test
test_step_add "wslua field/fieldinfo" wslua_step_field_test
test_step_add "wslua file" wslua_step_file_test
test_step_add "wslua globals" wslua_step_globals_test
test_step_add "wslua gregex" wslua_step_gregex_test
test_step_add "wslua int64" wslua_step_int64_test
test_step_add "wslua listener" wslua_step_listener_test
test_step_add "wslua nstime" wslua_step_nstime_test
test_step_add "wslua pinfo" wslua_step_pinfo_test
test_step_add "wslua proto/protofield" wslua_step_proto_test
test_step_add "wslua script arguments" wslua_step_args_test
test_step_add "wslua struct" wslua_step_struct_test
test_step_add "wslua tvb" wslua_step_tvb_test
}
#
# Editor modelines - http://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 8
# tab-width: 8
# indent-tabs-mode: t
# End:
#
# vi: set shiftwidth=8 tabstop=8 noexpandtab:
# :indentSize=8:tabSize=8:noTabs=false:
#
|
zonque/wireshark
|
test/suite-wslua.sh
|
Shell
|
gpl-2.0
| 14,230 |
# System Settings
export PATH="/bin:/sbin:/usr/bin:/usr/sbin"
export HOSTNAME=$(cat /etc/hostname)
PS1='\[\e[0;32m\]\u\[\e[m\] \[\e[1;34m\]\w\[\e[m\] \[\e[1;32m\]\$\[\e[m\] \[\e[1;37m\]'
export PS1
[ -r /etc/locale.conf ] && . /etc/locale.conf && export LANG
TERM="linux"
export TERM
alias ll='ls -al'
alias stop="systemctl stop"
alias start="systemctl start"
alias restart="systemctl restart"
alias status="systemctl status"
loadkmap < /etc/it.bmap
|
unixer/sifbox-rc1
|
packages/busybox/profile.d/system.sh
|
Shell
|
gpl-2.0
| 454 |
#!/bin/bash
# Example for running
docker run -e HOME=/root -t -i imiell/sd_xorg_libs /bin/bash
|
ianmiell/shutit-distro
|
xorg_libs/bin/run.sh
|
Shell
|
gpl-2.0
| 95 |
#@section user_vars
: ${SFS_SCAN_NAMES="usr"}
: ${SFS_SCAN_EXTENSIONS="sfs squashfs"}
#@section functions
# void liram_scan_squashfs (
# sync_dir,
# *sfs_name=**SFS_SCAN_NAMES=,
# **SFS_SCAN_DIR="",
# **SFS_SCAN_EXTENSIONS,
# **SFS_SYNC_DIR!,
# )
#
liram_scan_squashfs() {
SFS_SYNC_DIR="${1:?}"
shift
local \
FILE_SCAN_EXTENSIONS="${SFS_SCAN_EXTENSIONS-}" \
FILE_SCAN_SYNC_DIR="${SFS_SYNC_DIR}" \
FILE_SCAN_DIR="${SFS_SCAN_DIR-}"
if [ $# -gt 0 ]; then
liram_filescan "$@"
else
liram_filescan ${SFS_SCAN_NAMES-}
fi
}
# @function_alias liram_scan_sfs() renames liram_scan_squashfs()
liram_scan_sfs() { liram_scan_squashfs "$@"; }
# int liram_get_squashfs ( name, sync_dir=**TARBALL_SYNC_DIR )
#
# Resolves the symlink of a (previously found) squashfs file and stores the
# result in %v0.
#
# Returns 0 if the tarball file exists, else != 0.
#
liram_get_squashfs() {
liram_filescan_get "${1:?}" "${2-${SFS_SYNC_DIR-}}"
}
# @function_alias liram_get_sfs() renames liram_get_squashfs()
liram_get_sfs() { liram_get_squashfs "$@"; }
|
dywisor/shlib
|
lib/initramfs/newroot/liram/squashfs.sh
|
Shell
|
gpl-2.0
| 1,102 |
#!/usr/bin/env bash
SAVEIFS=$IFS
IFS=$(echo -en "\n\b")
function test_dir() {
local PREV_DIR=`pwd`
cd $1
if test -d ".svn"
then
echo "==========" `pwd` "=========="
svn st
elif test -d ".git"
then
echo "==========" `pwd` "=========="
git status
elif test -d ".bzr"
then
echo "==========" `pwd` "=========="
bzr st
elif test -d ".hg"
then
echo "==========" `pwd` "=========="
hg st
else
for SUB_DIR in $(find . -maxdepth 1 -mindepth 1 -type d)
do
test_dir "$SUB_DIR"
done
fi
cd $PREV_DIR
}
test_dir .
IFS=$SAVEIFS
|
kapojko/scm-utils
|
st-all.bash
|
Shell
|
gpl-2.0
| 672 |
#!/bin/bash
#Script d'installation pour Linux
#On fait simplement le lien entre /usr/lib et ./lib avec des symlinks
#Retourne le repertoire courant
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
sudo ln -s $DIR/lib/linux/libfreeimage-3.17.0.so /usr/lib/libfreeimage.so.3
sudo ln -s $DIR/lib/linux/libglfw.so.3.1 /usr/lib/libglfw.so.3
sudo ln -s $DIR/lib/linux/libglfw.so.3.1 /usr/lib/libglfw.so
sudo ln -s $DIR/lib/linux/libGLEW.so /usr/lib/libGLEW.so
sudo ln -s $DIR/lib/linux/libassimp.so.3.1.1 /usr/lib/libassimp.so.3
sudo ln -s $DIR/lib/linux/libassimp.so.3.1.1 /usr/lib/libassimp.so
sudo ln -s $DIR/lib/linux/libAntTweakBar.so /usr/lib/libAntTweakBar.so.1
|
mobeaudoin/ogl-first-engine
|
install_linux.sh
|
Shell
|
gpl-2.0
| 671 |
#!/bin/bash
# Copyright 2016 Vimal Manohar
# 2016 Yiming Wang
# 2016 Johns Hopkins University (author: Daniel Povey)
# Apache 2.0
# This script demonstrates how to re-segment training data selecting only the
# "good" audio that matches the transcripts.
# The basic idea is to decode with an existing in-domain acoustic model, and a
# biased language model built from the reference, and then work out the
# segmentation from a ctm like file.
# For nnet3 and chain results after cleanup, see the scripts in
# local/nnet3/run_tdnn.sh and local/chain/run_tdnn_6z.sh
# GMM Results for speaker-independent (SI) and speaker adaptive training (SAT) systems on dev and test sets
# [will add these later].
set -e
set -o pipefail
set -u
stage=0
cleanup_stage=0
data=data/train_960
cleanup_affix=cleaned
srcdir=exp/tri6b
nj=100
decode_nj=16
decode_num_threads=4
. ./path.sh
. ./cmd.sh
. ./utils/parse_options.sh
cleaned_data=${data}_${cleanup_affix}
dir=${srcdir}_${cleanup_affix}_work
cleaned_dir=${srcdir}_${cleanup_affix}
if [ $stage -le 1 ]; then
# This does the actual data cleanup.
steps/cleanup/clean_and_segment_data.sh --stage $cleanup_stage --nj $nj --cmd "$train_cmd" \
$data data/lang $srcdir $dir $cleaned_data
fi
if [ $stage -le 2 ]; then
steps/align_fmllr.sh --nj $nj --cmd "$train_cmd" \
$cleaned_data data/lang $srcdir ${srcdir}_ali_${cleanup_affix}
fi
if [ $stage -le 3 ]; then
steps/train_sat.sh --cmd "$train_cmd" \
7000 150000 $cleaned_data data/lang ${srcdir}_ali_${cleanup_affix} ${cleaned_dir}
fi
if [ $stage -le 4 ]; then
# Test with the models trained on cleaned-up data.
utils/mkgraph.sh data/lang_test_tgsmall ${cleaned_dir} ${cleaned_dir}/graph_tgsmall
for dset in test_clean test_other dev_clean dev_other; do
(
steps/decode_fmllr.sh --nj $decode_nj --num-threads $decode_num_threads \
--cmd "$decode_cmd" \
${cleaned_dir}/graph_tgsmall data/${dset} ${cleaned_dir}/decode_${dset}_tgsmall
steps/lmrescore.sh --cmd "$decode_cmd" data/lang_test_{tgsmall,tgmed} \
data/${dset} ${cleaned_dir}/decode_${dset}_{tgsmall,tgmed}
steps/lmrescore_const_arpa.sh \
--cmd "$decode_cmd" data/lang_test_{tgsmall,tglarge} \
data/${dset} ${cleaned_dir}/decode_${dset}_{tgsmall,tglarge}
steps/lmrescore_const_arpa.sh \
--cmd "$decode_cmd" data/lang_test_{tgsmall,fglarge} \
data/${dset} ${cleaned_dir}/decode_${dset}_{tgsmall,fglarge}
) &
done
fi
wait;
exit 0;
|
michellemorales/OpenMM
|
kaldi/egs/librispeech/s5/local/run_cleanup_segmentation.sh
|
Shell
|
gpl-2.0
| 2,492 |
NETCONF="firewall"
PHYS_LIST="a"
function prepare(){
mk_testnet net
mk_phys net a ip 172.16.0.1/24
mk_virt a 1 ip 192.168.99.1/24 mac 00:00:00:00:00:a1
mk_virt a 2 ip 192.168.99.2/24 mac 00:00:00:00:00:a2
mk_virt a 3 ip 192.168.99.3/24 mac 00:00:00:00:00:a3
mk_virt a 4 ip 192.168.99.4/24 mac 00:00:00:00:00:a4
}
function test() {
pass in_virt a 1 $qping 192.168.99.2
# This is a very naive QoS test
# If we limit the bandwidth a lot, then even ping fails ...
fail in_virt a 3 $qping 192.168.99.4
}
function connect(){
lsctl_in_all_phys parts/qos.lsctl
}
|
asch/lsdn
|
test/parts/qos.sh
|
Shell
|
gpl-2.0
| 571 |
#!/usr/bin/env bash
export PORT=8080
fresh
|
adampresley/portfolio
|
start-dev.sh
|
Shell
|
gpl-2.0
| 45 |
#! /bin/sh
#
# This is a kernel build script for Asianux 2's 2.6.9 kernel.
#
die () {
echo $1
exit 1
}
cd /tmp/ || die "Can't chdir to /tmp/ ."
if [ ! -r kernel-2.6.9-89.17.AXS2.src.rpm ]
then
wget http://ftp.miraclelinux.com/pub/Miracle/ia32/standard/4.0/updates/SRPMS/kernel-2.6.9-89.17.AXS2.src.rpm || die "Can't download source package."
fi
rpm --checksig kernel-2.6.9-89.17.AXS2.src.rpm || die "Can't verify signature."
rpm -ivh kernel-2.6.9-89.17.AXS2.src.rpm || die "Can't install source package."
cd /usr/src/asianux/SOURCES/ || die "Can't chdir to /usr/src/asianux/SOURCES/ ."
if [ ! -r ccs-patch-1.7.2-20110121.tar.gz ]
then
wget -O ccs-patch-1.7.2-20110121.tar.gz 'http://sourceforge.jp/frs/redir.php?f=/tomoyo/43375/ccs-patch-1.7.2-20110121.tar.gz' || die "Can't download patch."
fi
cd /tmp/ || die "Can't chdir to /tmp/ ."
cp -p /usr/src/asianux/SPECS/kernel-2.6.spec . || die "Can't copy spec file."
patch << "EOF" || die "Can't patch spec file."
--- kernel-2.6.spec
+++ kernel-2.6.spec
@@ -26,7 +26,7 @@
# that the kernel isn't the stock distribution kernel, for example by
# adding some text to the end of the version number.
#
-%define release 89.17%{?dist}
+%define release 89.17%{?dist}_tomoyo_1.7.2p4
%define sublevel 9
%define kversion 2.6.%{sublevel}
%define rpmversion 2.6.%{sublevel}
@@ -139,6 +139,9 @@
# to versions below the minimum
#
+# TOMOYO Linux
+%define signmodules 0
+
#
# First the general kernel 2.6 required versions as per
# Documentation/Changes
@@ -177,7 +180,7 @@
%define __find_provides /usr/lib/rpm/asianux/find-kmod-provides.sh
%define __find_requires %{nil}
-Name: kernel
+Name: ccs-kernel
Group: System Environment/Kernel
License: GPLv2
Version: %{rpmversion}
@@ -6316,6 +6319,10 @@
# END OF PATCH APPLICATIONS
+# TOMOYO Linux
+tar -zxf %_sourcedir/ccs-patch-1.7.2-20110121.tar.gz
+patch -sp1 < patches/ccs-patch-2.6.9-asianux-2.diff
+
cp %{SOURCE10} Documentation/
mkdir configs
@@ -6327,6 +6334,9 @@
for i in *.config
do
mv $i .config
+ # TOMOYO Linux
+ cat config.ccs >> .config
+ sed -i -e "s/^CONFIG_DEBUG_INFO=.*/# CONFIG_DEBUG_INFO is not set/" -- .config
make ARCH=`echo $i | cut -d"-" -f3 | cut -d"." -f1 | sed -e s/i.86/i386/ -e s/s390x/s390/ -e s/ppc64.series/ppc64/ ` nonint_oldconfig > /dev/null
cp .config configs/$i
done
EOF
mv kernel-2.6.spec ccs-kernel.spec || die "Can't rename spec file."
echo ""
echo ""
echo ""
echo "Edit /tmp/ccs-kernel.spec if needed, and run"
echo "rpmbuild -bb /tmp/ccs-kernel.spec"
echo "to build kernel rpm packages."
echo ""
echo "I'll start 'rpmbuild -bb --target i686 /tmp/ccs-kernel.spec' in 30 seconds. Press Ctrl-C to stop."
sleep 30
patch << "EOF" || die "Can't patch spec file."
--- /tmp/ccs-kernel.spec
+++ /tmp/ccs-kernel.spec
@@ -3,14 +3,14 @@
# What parts do we want to build? We must build at least one kernel.
# These are the kernels that are built IF the architecture allows it.
-%define buildup 1
+%define buildup 0
%define buildsmp 1
-%define buildsource 1
-%define buildhugemem 1
-%define buildlargesmp 1
+%define buildsource 0
+%define buildhugemem 0
+%define buildlargesmp 0
%define builddoc 0
-%define buildxen 1
-%define kabi 1
+%define buildxen 0
+%define kabi 0
%define FC2 0
%define FC3 0
EOF
exec rpmbuild -bb --target i686 /tmp/ccs-kernel.spec
exit 0
|
renaudallard/kernel-froyo-GT-I9000
|
specs/build-ax2-2.6.9.sh
|
Shell
|
gpl-2.0
| 3,344 |
case $0 in */*)D="${0%/*}";;*)D=.;;esac
. $D/../libsh/searchpath.sh
. $D/../libsh/assert.sh
X=/bin:/usr/bin:/usr/local/bin
assert [ "$(searchpath_find $X sh test -e)" = /bin/sh ]
assert [ "$(searchpath_find $X sh)" = /bin/sh ]
assert ! searchpath_find $X sh test -w
assert ! searchpath_find $X nonexistentexecutable test -e
X=a:b:c
assert searchpath_contains X a
assert searchpath_contains X b
assert searchpath_contains X c
assert searchpath_contains X x y a z
assert ! searchpath_contains X d
unset X
searchpath_append X a
assert [ $X = a ]
X=a:b:c
searchpath_append X d
assert [ $X = a:b:c:d ]
X=a:b:c:d
searchpath_append X d
assert [ $X = a:b:c:d ]
X=a:b:c:d
searchpath_append X b
assert [ $X = a:b:c:d ]
X=a:b:c:d
searchpath_append_force X b
assert [ $X = a:c:d:b ]
X=a:b:c
searchpath_append X b e f c d
assert [ $X = a:b:c:e:f:d ]
X=a:b:c
searchpath_append_force X b e f c d
assert [ $X = a:b:e:f:c:d ]
X=a:b:c
searchpath_prepend X d
assert [ $X = d:a:b:c ]
X=a:b:c:d
searchpath_prepend X a
assert [ $X = a:b:c:d ]
X=a:b:c:d
searchpath_prepend X b
assert [ $X = a:b:c:d ]
X=a:b:c:d
searchpath_prepend_force X b
assert [ $X = b:a:c:d ]
X=a:b:c
searchpath_prepend X b e f c d
assert [ $X = e:f:d:a:b:c ]
X=a:b:c
searchpath_prepend_force X b e f c d
assert [ $X = b:e:f:c:d:a ]
X=a:b:a:d:c
searchpath_remove X a c
assert [ $X = b:d ]
X=abc:abd:b1:aef:aeg:ak:b2
searchpath_remove_prefixed X a
assert [ $X = b1:b2 ]
X=abc:abd:b1:aef:aeg:ak:b2
searchpath_remove_prefixed X ab
assert [ $X = b1:aef:aeg:ak:b2 ]
X=abc:abd:b1:aef:aeg:ak:b2
searchpath_remove_prefixed X ab ae
assert [ $X = b1:ak:b2 ]
X=abc:abd:b1:aef:aeg:ak:b2
searchpath_remove_prefixed X a b
assert [ "$X" = '' ]
|
quixotique/shellboost
|
test/test_searchpath.sh
|
Shell
|
gpl-2.0
| 1,697 |
#!/bin/sh
pid=trigger_owner
pidpw=trgown123
ORACLE_SID=trgprd
ORACLE_HOME=/u01/app/oracle/product/10.2.0
export ORACLE_SID ORACLE_HOME pid pidpw
PATH=$PATH:$ORACLE_HOME/bin
export PATH
cd /home/isoladm/app/log
echo >> fun2_dailybal.log
echo "START OF DAILY BALANCE CHECK @ `date`" >> fun2_dailybal.log
sqlplus -s ${pid}/${pidpw}@${ORACLE_SID} << EOF >> fun2_dailybal.log
exec SP_PROCESS_FOR_DAILY_BAL_CHK;
exit
EOF
echo >> fun2_dailybal.log
echo "END OF DAILY BALANCE CHECK @ `date`" >> fun2_dailybal.log
echo >> fun2_dailybal.log
echo >> fun2_dailybal.log
|
zand3rs/fun2
|
scripts/fun2_dailybal_chk.sh
|
Shell
|
gpl-2.0
| 565 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2013-2020 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Try tup graph --combine
. ./tup.sh
cat > ok.sh << HERE
cat in1.txt in2.txt
touch out1.txt out2.txt out3.txt
HERE
cat > Tupfile << HERE
: |> sh ok.sh |> out1.txt out2.txt out3.txt
HERE
tup touch in1.txt in2.txt Tupfile
update
tup graph . --combine > ok.dot
# 3 in files is in1.txt, in2.txt, ok.sh
gitignore_good 'node.*in.*3 files' ok.dot
gitignore_good 'node.*out.*3 files' ok.dot
eotup
|
ppannuto/tup
|
test/t4133-graph-combine.sh
|
Shell
|
gpl-2.0
| 1,141 |
#! /bin/bash
# configure python environment
sudo yum install python27-devel -y
mv /usr/bin/python /usr/bin/python266
ln -s /usr/bin/python2.7 /usr/bin/python
sudo curl -o ez_setup.py https://bootstrap.pypa.io/ez_setup.py
sudo python ez_setup.py
sudo /usr/bin/easy_install-2.7 pip
sudo pip install virtualenv
cd $HOME
umount /data
echo "using drive " $1
echo "WARNING!! This will format the drive at" $1
read -rsp $'Press any key to continue or control-C to quit...\n' -n1 key
#make a new ext4 filesystem
mkfs.ext4 $1
#mount the new filesystem under /data
mount -t ext4 $1 /data
chmod a+rwx /data
#format the hadoop namenode
sudo -u hdfs hdfs namenode -format
#start hdfs
for x in `cd /etc/init.d ; ls hadoop-hdfs-*` ; do sudo service $x restart ; done
#make the hadoop directories
/usr/lib/hadoop/libexec/init-hdfs.sh
sudo -u hdfs hdfs dfs -mkdir /user/w205
sudo -u hdfs hdfs dfs -chown w205 /user/w205
#start YARN services
service hadoop-yarn-resourcemanager restart
service hadoop-yarn-nodemanager restart
service hadoop-mapreduce-historyserver restart
#set up directories for postgres
mkdir /data/pgsql
mkdir /data/pgsql/data
mkdir /data/pgsql/logs
chown -R postgres /data/pgsql
sudo -u postgres initdb -D /data/pgsql/data
#setup pg_hba.conf
sudo -u postgres echo "host all all 0.0.0.0 0.0.0.0 md5" >> /data/pgsql/data/pg_hba.conf
#setup postgresql.conf
sudo -u postgres echo "listen_addresses = '*'" >> /data/pgsql/data/postgresql.conf
sudo -u postgres echo "standard_conforming_strings = off" >> /data/pgsql/data/postgresql.conf
#make start postgres file
cd /data
cat > /data/start_postgres.sh <<EOF
#! /bin/bash
sudo -u postgres pg_ctl -D /data/pgsql/data -l /data/pgsql/logs/pgsql.log start
EOF
chmod +x /data/start_postgres.sh
#make a stop postgres file
cat > /data/stop_postgres.sh <<EOF
#! /bin/bash
sudo -u postgres pg_ctl -D /data/pgsql/data -l /data/pgsql/logs/pgsql.log stop
EOF
chmod +x /data/stop_postgres.sh
#start postgres
/data/start_postgres.sh
sleep 5
# create database for storage
cat > /data/setup_postgres_database.sql <<EOF
DROP DATABASE IF EXISTS solid_start;
CREATE DATABASE solid_start;
\connect solid_start;
CREATE SCHEMA raw;
CREATE SCHEMA src;
\q
EOF
sudo -u postgres psql -f /data/setup_postgres_database.sql
#write setup script for hive metastore
cat > /data/setup_hive_for_postgres.sql <<EOF
CREATE USER hiveuser WITH PASSWORD 'hive';
CREATE DATABASE metastore;
\c metastore
\i /usr/lib/hive/scripts/metastore/upgrade/postgres/hive-schema-1.1.0.postgres.sql
\i /usr/lib/hive/scripts/metastore/upgrade/postgres/hive-txn-schema-0.13.0.postgres.sql
\c metastore
\pset tuples_only on
\o /tmp/grant-privs
SELECT 'GRANT SELECT,INSERT,UPDATE,DELETE ON "' || schemaname || '". "' ||tablename ||'" TO hiveuser ;'
FROM pg_tables
WHERE tableowner = CURRENT_USER and schemaname = 'public';
\o
\pset tuples_only off
\i /tmp/grant-privs
\q
EOF
#run the metastore creation sql
sudo -u postgres psql -f /data/setup_hive_for_postgres.sql
#make the new hive configuration directory
sudo -u hadoop mkdir -p /data/hadoop/hive/conf
#setup the hive-site file
cat > /data/hadoop/hive/conf/hive-site.xml <<EOF
<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!-- Hive Configuration can either be stored in this file or in the hadoop configuration files -->
<!-- that are implied by Hadoop setup variables. -->
<!-- Aside from Hadoop setup variables - this file is provided as a convenience so that Hive -->
<!-- users do not have to edit hadoop configuration files (that may be managed as a centralized -->
<!-- resource). -->
<!-- Hive Execution Parameters -->
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:postgresql://localhost:5432/metastore</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>org.postgresql.Driver</value>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>hiveuser</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>hive</value>
</property>
<property>
<name>datanucleus.autoCreateSchema</name>
<value>false</value>
</property>
<!-- <property>
<name>hive.metastore.uris</name>
<value>thrift://localhost:9083</value>
<description>IP address (or fully-qualified domain name) and port of the metastore host</description>
</property>
-->
<property>
<name>hive.metastore.schema.verification</name>
<value>true</value>
</property>
</configuration>
EOF
#setup zeppelin
cat > setup_zeppelin.sh <<EOF
mkdir /data/w205
chown w205 /data/w205
sudo -u w205 wget -O /data/apache-maven-3.3.3-bin.tar.gz http://www.trieuvan.com/apache/maven/maven-3/3.3.3/binaries/apache-maven-3.3.3-bin.tar.gz
cd /data/ && sudo -u w205 tar xvzf /data/apache-maven-3.3.3-bin.tar.gz
sudo -u w205 git clone https://github.com/apache/incubator-zeppelin.git /data/zeppelin
cd /data/zeppelin
/data/apache-maven-3.3.3/bin/mvn -Pspark-1.5 -Dhadoop.version=2.6.0 -DskipTests -Phadoop-2.6 clean package
cp conf/zeppelin-env.sh.template conf/zeppelin-env.sh
cp /etc/hadoop/conf/*.xml conf/
cp /data/hadoop/hive/conf/hive-site.xml conf/
echo 'export ZEPPELIN_MEM="-Xmx2048m"' >> conf/zeppelin-env.sh
echo 'export SPARK_HOME=/home/w205/spark15' >> conf/zeppelin-env.sh
EOF
chmod +x setup_zeppelin.sh
#setup glassdoor and zillow etl applications
cp -r store/api/glassdoor/ /data/glassdoor
cp -r store/api/zillow/ /data/zillow
# create shell script to run glassdoor etl script as
# a daily cronjob
cat > /data/glassdoor.sh <<EOF
cd /data/glassdoor
source venv/bin/activate
pip install -r requirements.txt
/usr/bin/python glassdoorEtl/etl.py
deactivate
EOF
sudo chmod a+x /data/glassdoor.sh
# create shell script to run zillow etl script as
# a daily cronjob
cat > /data/zillow.sh <<EOF
cd /data/zillow
source venv/bin/activate
pip install -r requirements.txt
/usr/bin/python zillowEtl/etl.py
deactivate
EOF
sudo chmod a+x /data/zillow.sh
# copy analysis table scripts to /data/analysis
mkdir -p /data/analysis
cp -r analyze/sql /data/analysis
cp /analyze/analyis.sh /data
# create cronjob
sudo echo " 0 4 * * * /data/glassdoor.sh" >> /etc/crontab
sudo echo " 0 0 1 * * /data/zillow.sh" >> /etc/crontab
sudo echo " 0 0 1 * * /data/analysis.sh" >> /etc/crontab
|
pjryan126/solid-start-careers
|
init.sh
|
Shell
|
gpl-2.0
| 7,259 |
#!/usr/bin/env bash
OS=mac
PROJECT=zlib
VERSION="1.2.11"
DL_URL="https://zlib.net/zlib-$VERSION.tar.xz"
SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
MAIN_DIR="$SCRIPT_DIR/.."
TARGET_DIR="$MAIN_DIR/libs/$PROJECT"
PROJECT_DIR="$MAIN_DIR/tmp/$PROJECT"
DOWNLOAD="$PROJECT_DIR/$PROJECT-$VERSION.tar.xz"
SRC_DIR="$PROJECT_DIR/src"
BUILD_DIR="$SRC_DIR/$PROJECT-$VERSION"
function indent {
sed 's/^/ /'
}
function doPrepare {
if [ -d "$SRC_DIR" ]; then
rm -rf "$SRC_DIR"
fi
if [ -d "$TARGET_DIR" ]; then
rm -rf "$TARGET_DIR"
fi
mkdir -p "$PROJECT_DIR"
mkdir -p "$TARGET_DIR"
mkdir -p "$SRC_DIR"
}
function doDownload {
if [ ! -f "$DOWNLOAD" ]; then
curl "$DL_URL" -o "$DOWNLOAD"
fi
}
function doUnzip {
tar xJf "$DOWNLOAD" -C "$SRC_DIR"
}
function doConfigure {
./configure --help > "$PROJECT_DIR/options.txt"
./configure \
--prefix="$BUILD_DIR" \
--static \
--64
}
function doBuild {
cd "$SRC_DIR/$PROJECT-$VERSION"
export CC="clang"
export CXX="clang++"
export LDFLAGS="-L/opt/local/lib -lc++"
export CPPFLAGS="-I/opt/local/include"
export CXXFLAGS="-std=c++11 -stdlib=libc++ -mmacosx-version-min=10.7"
export CFLAGS="-I/opt/local/include -mmacosx-version-min=10.7"
# debug
(export CXXFLAGS="$CXXFLAGS -g -O0"; \
export CFLAGS="$CFLAGS -g -O0"; \
doConfigure)
make -j8 install prefix="$BUILD_DIR/debug"
# release
(export CXXFLAGS="$CXXFLAGS -msse2 -Ofast -finline -ffast-math -funsafe-math-optimizations"; \
export CFLAGS="$CFLAGS -msse2 -Ofast -finline -ffast-math -funsafe-math-optimizations"; \
doConfigure)
make -j8 install prefix="$BUILD_DIR/release"
}
function doCopy {
mkdir -p "$TARGET_DIR/bin/$OS/debug"
mkdir -p "$TARGET_DIR/bin/$OS/release"
mkdir -p "$TARGET_DIR/include"
cp -r "$BUILD_DIR/debug/lib/libz.a" "$TARGET_DIR/bin/$OS/debug/libz.a"
cp -r "$BUILD_DIR/release/lib/libz.a" "$TARGET_DIR/bin/$OS/release/libz.a"
cp -r "$BUILD_DIR/release/include"/* "$TARGET_DIR/include"
}
echo "Prepare"
doPrepare | indent
echo "Download"
doDownload | indent
echo "Unzip"
doUnzip | indent
echo "Build"
doBuild 2>&1 | indent
echo "Copy"
doCopy | indent
|
elsamuko/quat
|
scripts/build_zlib_mac.sh
|
Shell
|
gpl-2.0
| 2,290 |
#!/system/bin/sh
# Boeffla-Config controller interface
#
# *************************************
# SM-G900F Samsung Android 5.0 version
#
# V0.1
# *************************************
# ********************************
# Kernel specific initialisation
# ********************************
# kernel specification (hardware; type; target; url)
KERNEL_SPECS="g900f;samsung;lp50;http://boeffla.df-kunde.de/sgs5/boeffla-kernel/"
# kernel features
# (1=enable-busybox,2=enable-frandom,3=wipe-cache,4=disable-zram-control)
# (5=enable-default-zram-control,6=enable-selinux-switch, 7=enable-selinux-control)
KERNEL_FEATURES="-1-3-6-7-"
# path to kernel libraries
LIBPATH="/lib/modules"
# block devices
SYSTEM_DEVICE="/dev/block/platform/msm_sdcc.1/by-name/system"
CACHE_DEVICE="/dev/block/platform/msm_sdcc.1/by-name/cache"
DATA_DEVICE="/dev/block/platform/msm_sdcc.1/by-name/userdata"
BOOT_DEVICE="/dev/block/platform/msm_sdcc.1/by-name/boot"
RADIO_DEVICE="/dev/block/platform/msm_sdcc.1/by-name/modem"
RECOVERY_DEVICE="/dev/block/platform/msm_sdcc.1/by-name/recovery"
# *******************
# List of values
# *******************
if [ "lov_gov_profiles" == "$1" ]; then
echo "interactive - battery;interactive - battery extreme;interactive - performance;zzmoove - optimal;zzmoove - battery;zzmoove - battery plus;zzmoove - battery yank;zzmoove - battery extreme yank;zzmoove - performance;zzmoove - insane;zzmoove - moderate;zzmoove - game;zzmoove - relax"
exit 0
fi
if [ "lov_cpu_hotplug_profiles" == "$1" ]; then
echo "Default;Optimized;1 core max;2 cores max;3 cores max;2 cores min;3 cores min;4 cores min;2 cores exact;3 cores exact;zzmoove native default;zzmoove native 1 core max;zzmoove native 2 cores max;zzmoove native 3 cores max;zzmoove native 2 cores min;zzmoove native 3 cores min;zzmoove native 4 cores min"
exit 0
fi
if [ "lov_cpu_volt_profiles" == "$1" ]; then
echo "No undervolting;undervolt -25mV;undervolt -50mV;undervolt -75mV;undervolt -100mV;undervolt light;undervolt medium;undervolt heavy"
exit 0
fi
if [ "lov_gpu_freq_profiles" == "$1" ]; then
echo "54 only;160 only;160/266;266/350;54/108/160/200/266;108/160/200/266/350;160/266/350/440/533 (default);266/350/440/533/600;350/440/533/600/640;440/533/600/640/700"
exit 0
fi
if [ "lov_gpu_volt_profiles" == "$1" ]; then
echo "No undervolting;undervolt -25mV;undervolt -50mV;undervolt -75mV;undervolt -100mV;undervolt light;undervolt medium;undervolt heavy;overvolt +25mV;overvolt +50mV;overvolt +75mV;overvolt +100mV"
exit 0
fi
if [ "lov_gpu_freq" == "$1" ]; then
echo "54;108;160;200;266;300;350;400;440;500;533;600;640;700"
exit 0
fi
if [ "lov_eq_gain_profiles" == "$1" ]; then
echo "Flat;Archis audiophile;Baseland;Bass extreme;Bass treble;Classic;Dance;Eargasm;Googy;Metal/Rock;Pleasant;Treble"
exit 0
fi
if [ "lov_system_tweaks" == "$1" ]; then
echo "Off;Boeffla tweaks;Speedmod tweaks;Mattiadj tweaks"
exit 0
fi
if [ "lov_modules" == "$1" ]; then
ls $LIBPATH/*
exit 0
fi
if [ "lov_presets" == "$1" ]; then
# Note, the ^ sign will be translated into newline for this setting
echo "Power extreme~"
echo "Gov: zzmoove / performance"
echo "^Sched: row"
echo "^CPU: 2841 / no uv"
echo "^GPU: 389-578;"
echo "Power~"
echo "Gov: ondemand / standard"
echo "^Sched: row"
echo "^CPU: 2611 / no uv"
echo "^GPU: 320-578;"
echo "Standard~"
echo "Gov: interactive / standard"
echo "^Sched: row"
echo "^CPU: 2457 / no uv"
echo "^GPU: 200-578;"
echo "Battery friendly~"
echo "Gov: interactive / standard"
echo "^Sched: zen"
echo "^CPU: 1728 / -25mV"
echo "^GPU: 27-320;"
echo "Battery saving~"
echo "Gov: zzmoove / battery yank"
echo "^Sched: zen"
echo "^CPU: 1497 / light uv"
echo "^GPU: 27 only;"
exit 0
fi
# ************************************
# Configuration values (for profiles)
# ************************************
if [ "conf_presets" == "$1" ]; then
if [ "Power extreme" == "$2" ]; then
# gov, gov prof, sched int, sched ext, cpu max, cpu uv, gpu freq, gpu uv
echo "zzmoove;zzmoove - performance;"
echo "row;row;"
echo "2841600;None;"
echo "2,0;None"
fi
if [ "Power" == "$2" ]; then
# gov, gov prof, sched int, sched ext, cpu max, cpu uv, gpu freq, gpu uv
echo "ondemand;ondemand - standard;"
echo "row;row;"
echo "2611200;None;"
echo "3,0;None"
fi
if [ "Standard" == "$2" ]; then
# gov, gov prof, sched int, sched ext, cpu max, cpu uv, gpu freq, gpu uv
echo "interactive;standard;"
echo "row;row;"
echo "2457600;None;"
echo "5,0;None"
fi
if [ "Battery friendly" == "$2" ]; then
# gov, gov prof, sched int, sched ext, cpu max, cpu uv, gpu freq, gpu uv
echo "interactive;standard;"
echo "zen;zen;"
echo "1728000;undervolt -25mV;"
echo "6,3;None"
fi
if [ "Battery saving" == "$2" ]; then
# gov, gov prof, sched int, sched ext, cpu max, cpu uv, gpu freq, gpu uv
echo "zzmoove;zzmoove - battery yank;"
echo "zen;zen;"
echo "1497600;undervolt light;"
echo "6,6;None"
fi
exit 0
fi
if [ "conf_gpu_freq" == "$1" ]; then
if [ "54 only" == "$2" ]; then
echo "54;54;54;54;54"
fi
if [ "160 only" == "$2" ]; then
echo "160;160;160;160;160"
fi
if [ "160/266" == "$2" ]; then
echo "160;160;160;266;266"
fi
if [ "266/350" == "$2" ]; then
echo "266;266;266;350;350"
fi
if [ "54/108/160/200/266" == "$2" ]; then
echo "54;108;160;200;266"
fi
if [ "108/160/200/266/350" == "$2" ]; then
echo "108;160;200;266;350"
fi
if [ "160/266/350/440/533 (default)" == "$2" ]; then
echo "160;266;350;440;533"
fi
if [ "266/350/440/533/600" == "$2" ]; then
echo "266;350;440;533;600"
fi
if [ "350/440/533/600/640" == "$2" ]; then
echo "350;440;533;600;640"
fi
if [ "440/533/600/640/700" == "$2" ]; then
echo "440;533;600;640;700"
fi
exit 0
fi
if [ "conf_gpu_volt" == "$1" ]; then
if [ "No undervolting" == "$2" ]; then
echo "0;0;0;0;0"
fi
if [ "undervolt -25mV" == "$2" ]; then
echo "-25000;-25000;-25000;-25000;-25000"
fi
if [ "undervolt -50mV" == "$2" ]; then
echo "-50000;-50000;-50000;-50000;-50000"
fi
if [ "undervolt -75mV" == "$2" ]; then
echo "-75000;-75000;-75000;-75000;-75000"
fi
if [ "undervolt -100mV" == "$2" ]; then
echo "-100000;-100000;-100000;-100000;-100000"
fi
if [ "undervolt light" == "$2" ]; then
echo "-25000;-25000;-25000;-50000;-50000"
fi
if [ "undervolt medium" == "$2" ]; then
echo "-50000;-50000;-50000;-75000;-75000"
fi
if [ "undervolt heavy" == "$2" ]; then
echo "-75000;-75000;-75000;-100000;-100000"
fi
if [ "overvolt +25mV" == "$2" ]; then
echo "25000;25000;25000;25000;25000"
fi
if [ "overvolt +50mV" == "$2" ]; then
echo "50000;50000;50000;50000;50000"
fi
if [ "overvolt +75mV" == "$2" ]; then
echo "75000;75000;75000;75000;75000"
fi
if [ "overvolt +100mV" == "$2" ]; then
echo "100000;100000;100000;100000;100000"
fi
exit 0
fi
if [ "conf_cpu_volt" == "$1" ]; then
if [ "No undervolting" == "$2" ]; then
echo "0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0"
fi
if [ "undervolt -25mV" == "$2" ]; then
echo "-25;-25;-25;-25;-25;-25;-25;-25;-25;-25;-25;-25;-25;-25;-25;-25;-25;-25;-25"
fi
if [ "undervolt -50mV" == "$2" ]; then
echo "-50;-50;-50;-50;-50;-50;-50;-50;-50;-50;-50;-50;-50;-50;-50;-50;-50;-50;-50"
fi
if [ "undervolt -75mV" == "$2" ]; then
echo "-75;-75;-75;-75;-75;-75;-75;-75;-75;-75;-75;-75;-75;-75;-75;-75;-75;-75;-75"
fi
if [ "undervolt -100mV" == "$2" ]; then
echo "-100;-100;-100;-100;-100;-100;-100;-100;-100;-100;-100;-100;-100;-100;-100;-100;-100;-100;-100"
fi
if [ "undervolt light" == "$2" ]; then
echo "-50;-50;-50;-50;-50;-25;-25;-25;-25;-25;-25;-25;0;0;0;0;0;0;0"
fi
if [ "undervolt medium" == "$2" ]; then
echo "-75;-75;-75;-75;-75;-75;-50;-50;-50;-50;-50;-25;-25;-25;-25;-25;-25;-25;-25"
fi
if [ "undervolt heavy" == "$2" ]; then
echo "-100;-100;-100;-100;-100;-100;-100;-75;-75;-75;-75;-75;-50;-50;-50;-50;-50;-50;-50"
fi
exit 0
fi
if [ "conf_eq_gains" == "$1" ]; then
if [ "Flat" == "$2" ]; then
echo "0;0;0;0;0"
fi
if [ "Archis audiophile" == "$2" ]; then
echo "8;4;4;2;6"
fi
if [ "Eargasm" == "$2" ]; then
echo "12;8;4;2;3"
fi
if [ "Pleasant" == "$2" ]; then
echo "4;3;2;2;3"
fi
if [ "Classic" == "$2" ]; then
echo "0;0;0;-3;-5"
fi
if [ "Bass treble" == "$2" ]; then
echo "10;7;0;2;5"
fi
if [ "Bass extreme" == "$2" ]; then
echo "12;8;3;-1;1"
fi
if [ "Treble" == "$2" ]; then
echo "-5;1;0;4;3"
fi
if [ "Baseland" == "$2" ]; then
echo "8;7;4;3;3"
fi
if [ "Dance" == "$2" ]; then
echo "4;0;-6;0;3"
fi
if [ "Metal/Rock" == "$2" ]; then
echo "4;3;0;-4;3"
fi
if [ "Googy" == "$2" ]; then
echo "10;2;-1;2;10"
fi
exit 0
fi
# *******************
# Parameters
# *******************
if [ "param_readahead" == "$1" ]; then
# Internal sd (min/max/steps)
echo "128;3072;128;"
# External sd (min/max/steps)
echo "128;3072;128"
exit 0
fi
if [ "param_boeffla_sound" == "$1" ]; then
# Headphone min/max, Speaker min/max
echo "-30;30;-30;30;"
# Equalizer min/max
echo "-12;12;"
# Microphone gain min/max
echo "-30;30;"
# Stereo expansion min/max
echo "0;31"
exit 0
fi
if [ "param_cpu_uv" == "$1" ]; then
# CPU UV min/max/steps
echo "600;1500;25"
exit 0
fi
if [ "param_gpu_uv" == "$1" ]; then
# GPU UV min/max/steps
echo "500000;1200000;25000"
exit 0
fi
if [ "param_led" == "$1" ]; then
# LED speed min/max/steps
echo "0;15;1;"
# LED brightness min/max/steps
echo "0;255;5"
exit 0
fi
if [ "param_touchwake" == "$1" ]; then
# Touchwake min/max/steps
echo "0;600000;5000;"
# Knockon min/max/steps
echo "100;2000;100"
exit 0
fi
if [ "param_early_suspend_delay" == "$1" ]; then
# Early suspend delay min/max/steps
echo "0;700;25"
exit 0
fi
if [ "param_zram" == "$1" ]; then
# zRam size min/max/steps
echo "104857600;1572864000;20971520;"
# swappiness max value
echo "200"
exit 0
fi
if [ "param_charge_rates" == "$1" ]; then
# AC charge min/max/steps
echo "0;2000;50;"
# USB charge min/max/steps
echo "0;1600;50;"
# Wireless charge min/max/steps
echo "100;1600;25"
exit 0
fi
if [ "param_lmk" == "$1" ]; then
# LMK size min/max/steps
echo "5;300;1"
exit 0
fi
# *******************
# Get settings
# *******************
if [ "get_ums" == "$1" ]; then
# if [ "`busybox grep 179 /sys/devices/platform/s3c-usbgadget/gadget/lun0/file`" ]; then
# echo "1"
# else
# echo "0"
# fi
echo ""
exit 0
fi
if [ "get_tunables" == "$1" ]; then
if [ -d /sys/devices/system/cpu/cpufreq/$2 ]; then
cd /sys/devices/system/cpu/cpufreq/$2
for file in *
do
content="`busybox cat $file`"
busybox echo -ne "$file~$content;"
done
fi
fi
if [ "get_kernel_version2" == "$1" ]; then
busybox cat /proc/version
exit 0
fi
if [ "get_kernel_specs" == "$1" ]; then
echo $KERNEL_SPECS
exit 0
fi
if [ "get_kernel_features" == "$1" ]; then
echo $KERNEL_FEATURES
exit 0
fi
# *******************
# Applying settings
# *******************
if [ "apply_cpu_hotplug_profile" == "$1" ]; then
if [ `busybox cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor | busybox grep zzmoove` ]; then
echo "0" > /sys/devices/system/cpu/cpufreq/zzmoove/hotplug_max_limit
echo "0" > /sys/devices/system/cpu/cpufreq/zzmoove/hotplug_min_limit
echo "2" > /sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug
echo "2" > /sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug_sleep
fi
if [ "Default" == "$2" ]; then
echo "0" >/sys/devices/system/cpu/cpu0/online_control
echo "0" >/sys/devices/system/cpu/cpu1/online_control
echo "0" >/sys/devices/system/cpu/cpu2/online_control
echo "0" >/sys/devices/system/cpu/cpu3/online_control
exit 0
fi
if [ "1 core max" == "$2" ]; then
echo "0" >/sys/devices/system/cpu/cpu0/online_control
echo "2" >/sys/devices/system/cpu/cpu1/online_control
echo "2" >/sys/devices/system/cpu/cpu2/online_control
echo "2" >/sys/devices/system/cpu/cpu3/online_control
exit 0
fi
if [ "2 cores max" == "$2" ]; then
echo "0" >/sys/devices/system/cpu/cpu0/online_control
echo "0" >/sys/devices/system/cpu/cpu1/online_control
echo "2" >/sys/devices/system/cpu/cpu2/online_control
echo "2" >/sys/devices/system/cpu/cpu3/online_control
exit 0
fi
if [ "3 cores max" == "$2" ]; then
echo "0" >/sys/devices/system/cpu/cpu0/online_control
echo "0" >/sys/devices/system/cpu/cpu1/online_control
echo "0" >/sys/devices/system/cpu/cpu2/online_control
echo "2" >/sys/devices/system/cpu/cpu3/online_control
exit 0
fi
if [ "2 cores min" == "$2" ]; then
echo "1" >/sys/devices/system/cpu/cpu0/online_control
echo "0" >/sys/devices/system/cpu/cpu1/online_control
echo "0" >/sys/devices/system/cpu/cpu2/online_control
echo "1" >/sys/devices/system/cpu/cpu3/online_control
exit 0
fi
if [ "3 cores min" == "$2" ]; then
echo "1" >/sys/devices/system/cpu/cpu0/online_control
echo "0" >/sys/devices/system/cpu/cpu1/online_control
echo "1" >/sys/devices/system/cpu/cpu2/online_control
echo "1" >/sys/devices/system/cpu/cpu3/online_control
exit 0
fi
if [ "4 cores min" == "$2" ]; then
echo "1" >/sys/devices/system/cpu/cpu0/online_control
echo "1" >/sys/devices/system/cpu/cpu1/online_control
echo "1" >/sys/devices/system/cpu/cpu2/online_control
echo "1" >/sys/devices/system/cpu/cpu3/online_control
exit 0
fi
if [ "Optimized" == "$2" ]; then
echo "0" >/sys/devices/system/cpu/cpu0/online_control
echo "0" >/sys/devices/system/cpu/cpu1/online_control
echo "0" >/sys/devices/system/cpu/cpu2/online_control
echo "3" >/sys/devices/system/cpu/cpu3/online_control
exit 0
fi
if [ "2 cores exact" == "$2" ]; then
echo "1" >/sys/devices/system/cpu/cpu0/online_control
echo "2" >/sys/devices/system/cpu/cpu1/online_control
echo "2" >/sys/devices/system/cpu/cpu2/online_control
echo "1" >/sys/devices/system/cpu/cpu3/online_control
exit 0
fi
if [ "3 cores exact" == "$2" ]; then
echo "1" >/sys/devices/system/cpu/cpu0/online_control
echo "1" >/sys/devices/system/cpu/cpu1/online_control
echo "2" >/sys/devices/system/cpu/cpu2/online_control
echo "1" >/sys/devices/system/cpu/cpu3/online_control
exit 0
fi
if [ "zzmoove native default" == "$2" ]; then
echo "1" >/sys/devices/system/cpu/cpu0/online_control
echo "1" >/sys/devices/system/cpu/cpu1/online_control
echo "1" >/sys/devices/system/cpu/cpu2/online_control
echo "1" >/sys/devices/system/cpu/cpu3/online_control
if [ `busybox cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor | busybox grep zzmoove` ]; then
echo "0" > /sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug
echo "0" > /sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug_sleep
if [ `busybox cat /sys/devices/system/cpu/cpufreq/zzmoove/profile_number | busybox grep 11` ]; then
echo "2" > /sys/devices/system/cpu/cpufreq/zzmoove/hotplug_min_limit
fi
else
echo "0" >/sys/devices/system/cpu/cpu0/online_control
echo "0" >/sys/devices/system/cpu/cpu1/online_control
echo "0" >/sys/devices/system/cpu/cpu2/online_control
echo "0" >/sys/devices/system/cpu/cpu3/online_control
fi
exit 0
fi
if [ "zzmoove native 1 core max" == "$2" ]; then
echo "1" >/sys/devices/system/cpu/cpu0/online_control
echo "1" >/sys/devices/system/cpu/cpu1/online_control
echo "1" >/sys/devices/system/cpu/cpu2/online_control
echo "1" >/sys/devices/system/cpu/cpu3/online_control
if [ `busybox cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor | busybox grep zzmoove` ]; then
echo "0" > /sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug
echo "0" > /sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug_sleep
echo "1" > /sys/devices/system/cpu/cpufreq/zzmoove/hotplug_max_limit
else
echo "0" >/sys/devices/system/cpu/cpu0/online_control
echo "0" >/sys/devices/system/cpu/cpu1/online_control
echo "0" >/sys/devices/system/cpu/cpu2/online_control
echo "0" >/sys/devices/system/cpu/cpu3/online_control
fi
exit 0
fi
if [ "zzmoove native 2 cores max" == "$2" ]; then
echo "1" >/sys/devices/system/cpu/cpu0/online_control
echo "1" >/sys/devices/system/cpu/cpu1/online_control
echo "1" >/sys/devices/system/cpu/cpu2/online_control
echo "1" >/sys/devices/system/cpu/cpu3/online_control
if [ `busybox cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor | busybox grep zzmoove` ]; then
echo "0" > /sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug
echo "0" > /sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug_sleep
echo "2" > /sys/devices/system/cpu/cpufreq/zzmoove/hotplug_max_limit
else
echo "0" >/sys/devices/system/cpu/cpu0/online_control
echo "0" >/sys/devices/system/cpu/cpu1/online_control
echo "0" >/sys/devices/system/cpu/cpu2/online_control
echo "0" >/sys/devices/system/cpu/cpu3/online_control
fi
exit 0
fi
if [ "zzmoove native 3 cores max" == "$2" ]; then
echo "1" >/sys/devices/system/cpu/cpu0/online_control
echo "1" >/sys/devices/system/cpu/cpu1/online_control
echo "1" >/sys/devices/system/cpu/cpu2/online_control
echo "1" >/sys/devices/system/cpu/cpu3/online_control
if [ `busybox cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor | busybox grep zzmoove` ]; then
echo "0" > /sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug
echo "0" > /sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug_sleep
echo "3" > /sys/devices/system/cpu/cpufreq/zzmoove/hotplug_max_limit
else
echo "0" >/sys/devices/system/cpu/cpu0/online_control
echo "0" >/sys/devices/system/cpu/cpu1/online_control
echo "0" >/sys/devices/system/cpu/cpu2/online_control
echo "0" >/sys/devices/system/cpu/cpu3/online_control
fi
exit 0
fi
if [ "zzmoove native 2 cores min" == "$2" ]; then
echo "1" >/sys/devices/system/cpu/cpu0/online_control
echo "1" >/sys/devices/system/cpu/cpu1/online_control
echo "1" >/sys/devices/system/cpu/cpu2/online_control
echo "1" >/sys/devices/system/cpu/cpu3/online_control
if [ `busybox cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor | busybox grep zzmoove` ]; then
echo "0" > /sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug
echo "0" > /sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug_sleep
echo "2" > /sys/devices/system/cpu/cpufreq/zzmoove/hotplug_min_limit
else
echo "0" >/sys/devices/system/cpu/cpu0/online_control
echo "0" >/sys/devices/system/cpu/cpu1/online_control
echo "0" >/sys/devices/system/cpu/cpu2/online_control
echo "0" >/sys/devices/system/cpu/cpu3/online_control
fi
exit 0
fi
if [ "zzmoove native 3 cores min" == "$2" ]; then
echo "1" >/sys/devices/system/cpu/cpu0/online_control
echo "1" >/sys/devices/system/cpu/cpu1/online_control
echo "1" >/sys/devices/system/cpu/cpu2/online_control
echo "1" >/sys/devices/system/cpu/cpu3/online_control
if [ `busybox cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor | busybox grep zzmoove` ]; then
echo "0" > /sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug
echo "0" > /sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug_sleep
echo "3" > /sys/devices/system/cpu/cpufreq/zzmoove/hotplug_min_limit
else
echo "0" >/sys/devices/system/cpu/cpu0/online_control
echo "0" >/sys/devices/system/cpu/cpu1/online_control
echo "0" >/sys/devices/system/cpu/cpu2/online_control
echo "0" >/sys/devices/system/cpu/cpu3/online_control
fi
exit 0
fi
if [ "zzmoove native 4 cores min" == "$2" ]; then
echo "1" >/sys/devices/system/cpu/cpu0/online_control
echo "1" >/sys/devices/system/cpu/cpu1/online_control
echo "1" >/sys/devices/system/cpu/cpu2/online_control
echo "1" >/sys/devices/system/cpu/cpu3/online_control
if [ `busybox cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor | busybox grep zzmoove` ]; then
echo "0" > /sys/devices/system/cpu/cpufreq/zzmoove/hotplug_max_limit
echo "0" > /sys/devices/system/cpu/cpufreq/zzmoove/hotplug_min_limit
echo "1" > /sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug
echo "1" > /sys/devices/system/cpu/cpufreq/zzmoove/disable_hotplug_sleep
else
echo "0" >/sys/devices/system/cpu/cpu0/online_control
echo "0" >/sys/devices/system/cpu/cpu1/online_control
echo "0" >/sys/devices/system/cpu/cpu2/online_control
echo "0" >/sys/devices/system/cpu/cpu3/online_control
fi
exit 0
fi
exit 0;
fi
if [ "apply_governor_profile" == "$1" ]; then
if [ "ondemand - standard" == "$2" ]; then
echo "3" >/sys/devices/system/cpu/cpufreq/ondemand/down_differential
echo "3" >/sys/devices/system/cpu/cpufreq/ondemand/down_differential_multi_core
echo "0" >/sys/devices/system/cpu/cpufreq/ondemand/ignore_nice_load
echo "0" >/sys/devices/system/cpu/cpufreq/ondemand/input_boost
echo "0" >/sys/devices/system/cpu/cpufreq/ondemand/io_is_busy
echo "300000" >/sys/devices/system/cpu/cpufreq/ondemand/optimal_freq
echo "0" >/sys/devices/system/cpu/cpufreq/ondemand/powersave_bias
echo "1" >/sys/devices/system/cpu/cpufreq/ondemand/sampling_down_factor
echo "100000" >/sys/devices/system/cpu/cpufreq/ondemand/sampling_rate
echo "10000" >/sys/devices/system/cpu/cpufreq/ondemand/sampling_rate_min
echo "300000" >/sys/devices/system/cpu/cpufreq/ondemand/sync_freq
echo "95" >/sys/devices/system/cpu/cpufreq/ondemand/up_threshold
echo "80" >/sys/devices/system/cpu/cpufreq/ondemand/up_threshold_any_cpu_load
echo "80" >/sys/devices/system/cpu/cpufreq/ondemand/up_threshold_multi_core
busybox sleep 0.5s
busybox sync
fi
if [ "conservative - standard" == "$2" ]; then
echo "20" >/sys/devices/system/cpu/cpufreq/conservative/down_threshold
echo "5" >/sys/devices/system/cpu/cpufreq/conservative/freq_step
echo "0" >/sys/devices/system/cpu/cpufreq/conservative/ignore_nice_load
echo "1" >/sys/devices/system/cpu/cpufreq/conservative/sampling_down_factor
echo "200000" >/sys/devices/system/cpu/cpufreq/conservative/sampling_rate
echo "200000" >/sys/devices/system/cpu/cpufreq/conservative/sampling_rate_min
echo "80" >/sys/devices/system/cpu/cpufreq/conservative/up_threshold
busybox sleep 0.5s
busybox sync
fi
if [ "intelliactive - standard" == "$2" ]; then
echo "20000" > /sys/devices/system/cpu/cpufreq/intelliactive/above_hispeed_delay
echo "0" > /sys/devices/system/cpu/cpufreq/intelliactive/boost
echo "" > /sys/devices/system/cpu/cpufreq/intelliactive/boostpulse
echo "80000" > /sys/devices/system/cpu/cpufreq/intelliactive/boostpulse_duration
echo "99" > /sys/devices/system/cpu/cpufreq/intelliactive/go_hispeed_load
echo "1400000" > /sys/devices/system/cpu/cpufreq/intelliactive/hispeed_freq
echo "1" > /sys/devices/system/cpu/cpufreq/intelliactive/io_is_busy
echo "80000" > /sys/devices/system/cpu/cpufreq/intelliactive/min_sample_time
echo "0" > /sys/devices/system/cpu/cpufreq/intelliactive/sampling_down_factor
echo "729600" > /sys/devices/system/cpu/cpufreq/intelliactive/sync_freq
echo "90" > /sys/devices/system/cpu/cpufreq/intelliactive/target_loads
echo "20000" > /sys/devices/system/cpu/cpufreq/intelliactive/timer_rate
echo "80000" > /sys/devices/system/cpu/cpufreq/intelliactive/timer_slack
echo "1728000,1728000,1728000,1728000" > /sys/devices/system/cpu/cpufreq/intelliactive/two_phase_freq
echo "960000" > /sys/devices/system/cpu/cpufreq/intelliactive/up_threshold_any_cpu_freq
echo "95" > /sys/devices/system/cpu/cpufreq/intelliactive/up_threshold_any_cpu_load
busybox sleep 0.5s
busybox sync
fi
if [ "intellidemand - standard" == "$2" ]; then
echo "3" > /sys/devices/system/cpu/cpufreq/intellidemand/down_differential
echo "0" > /sys/devices/system/cpu/cpufreq/intellidemand/ignore_nice_load
echo "1" > /sys/devices/system/cpu/cpufreq/intellidemand/io_is_busy
echo "0" > /sys/devices/system/cpu/cpufreq/intellidemand/powersave_bias
echo "15" > /sys/devices/system/cpu/cpufreq/intellidemand/sampling_down_factor
echo "10000" > /sys/devices/system/cpu/cpufreq/intellidemand/sampling_rate
echo "4294967295" > /sys/devices/system/cpu/cpufreq/intellidemand/sampling_rate_max
echo "10000" > /sys/devices/system/cpu/cpufreq/intellidemand/sampling_rate_min
echo "85" > /sys/devices/system/cpu/cpufreq/intellidemand/up_threshold
busybox sleep 0.5s
busybox sync
fi
if [ "interactive - standard" == "$2" ]; then
echo "0" > /sys/devices/system/cpu/cpufreq/interactive/bk_locked
echo "20000 1400000:40000 1700000:20000" > /sys/devices/system/cpu/cpufreq/interactive/above_hispeed_delay
echo "0" > /sys/devices/system/cpu/cpufreq/interactive/boost
echo "" > /sys/devices/system/cpu/cpufreq/interactive/boostpulse
echo "80000" > /sys/devices/system/cpu/cpufreq/interactive/boostpulse_duration
echo "90" > /sys/devices/system/cpu/cpufreq/interactive/go_hispeed_load
echo "1190400" > /sys/devices/system/cpu/cpufreq/interactive/hispeed_freq
echo "1" > /sys/devices/system/cpu/cpufreq/interactive/io_is_busy
echo "40000" > /sys/devices/system/cpu/cpufreq/interactive/min_sample_time
echo "100000" > /sys/devices/system/cpu/cpufreq/interactive/sampling_down_factor
echo "1036800" > /sys/devices/system/cpu/cpufreq/interactive/sync_freq
echo "85 1500000:90 1800000:70" > /sys/devices/system/cpu/cpufreq/interactive/target_loads
echo "30000" > /sys/devices/system/cpu/cpufreq/interactive/timer_rate
echo "20000" > /sys/devices/system/cpu/cpufreq/interactive/timer_slack
echo "1190400" > /sys/devices/system/cpu/cpufreq/interactive/up_threshold_any_cpu_freq
echo "50" > /sys/devices/system/cpu/cpufreq/interactive/up_threshold_any_cpu_load
echo "1" > /sys/devices/system/cpu/cpufreq/interactive/bk_locked
busybox sleep 0.5s
busybox sync
fi
if [ "interactive - battery" == "$2" ]; then
echo "0" > /sys/devices/system/cpu/cpufreq/interactive/bk_locked
echo "20000 1400000:40000 1700000:20000" > /sys/devices/system/cpu/cpufreq/interactive/above_hispeed_delay
echo "0" > /sys/devices/system/cpu/cpufreq/interactive/boost
echo "" > /sys/devices/system/cpu/cpufreq/interactive/boostpulse
echo "80000" > /sys/devices/system/cpu/cpufreq/interactive/boostpulse_duration
echo "95" > /sys/devices/system/cpu/cpufreq/interactive/go_hispeed_load
echo "833200" > /sys/devices/system/cpu/cpufreq/interactive/hispeed_freq
echo "1" > /sys/devices/system/cpu/cpufreq/interactive/io_is_busy
echo "10000" > /sys/devices/system/cpu/cpufreq/interactive/min_sample_time
echo "100000" > /sys/devices/system/cpu/cpufreq/interactive/sampling_down_factor
echo "1036800" > /sys/devices/system/cpu/cpufreq/interactive/sync_freq
echo "85 1200000:90 1500000:70" > /sys/devices/system/cpu/cpufreq/interactive/target_loads
echo "50000" > /sys/devices/system/cpu/cpufreq/interactive/timer_rate
echo "20000" > /sys/devices/system/cpu/cpufreq/interactive/timer_slack
echo "1190400" > /sys/devices/system/cpu/cpufreq/interactive/up_threshold_any_cpu_freq
echo "50" > /sys/devices/system/cpu/cpufreq/interactive/up_threshold_any_cpu_load
echo "1" > /sys/devices/system/cpu/cpufreq/interactive/bk_locked
busybox sleep 0.5s
busybox sync
fi
if [ "interactive - battery extreme" == "$2" ]; then
echo "0" > /sys/devices/system/cpu/cpufreq/interactive/bk_locked
echo "20000 1400000:40000 1700000:20000" > /sys/devices/system/cpu/cpufreq/interactive/above_hispeed_delay
echo "0" > /sys/devices/system/cpu/cpufreq/interactive/boost
echo "" > /sys/devices/system/cpu/cpufreq/interactive/boostpulse
echo "80000" > /sys/devices/system/cpu/cpufreq/interactive/boostpulse_duration
echo "100" > /sys/devices/system/cpu/cpufreq/interactive/go_hispeed_load
echo "300000" > /sys/devices/system/cpu/cpufreq/interactive/hispeed_freq
echo "1" > /sys/devices/system/cpu/cpufreq/interactive/io_is_busy
echo "5000" > /sys/devices/system/cpu/cpufreq/interactive/min_sample_time
echo "100000" > /sys/devices/system/cpu/cpufreq/interactive/sampling_down_factor
echo "1036800" > /sys/devices/system/cpu/cpufreq/interactive/sync_freq
echo "85 900000:90 1200000:70" > /sys/devices/system/cpu/cpufreq/interactive/target_loads
echo "100000" > /sys/devices/system/cpu/cpufreq/interactive/timer_rate
echo "20000" > /sys/devices/system/cpu/cpufreq/interactive/timer_slack
echo "1190400" > /sys/devices/system/cpu/cpufreq/interactive/up_threshold_any_cpu_freq
echo "50" > /sys/devices/system/cpu/cpufreq/interactive/up_threshold_any_cpu_load
echo "1" > /sys/devices/system/cpu/cpufreq/interactive/bk_locked
busybox sleep 0.5s
busybox sync
fi
if [ "interactive - performance" == "$2" ]; then
echo "0" > /sys/devices/system/cpu/cpufreq/interactive/bk_locked
echo "20000 1400000:40000 1700000:20000" > /sys/devices/system/cpu/cpufreq/interactive/above_hispeed_delay
echo "0" > /sys/devices/system/cpu/cpufreq/interactive/boost
echo "" > /sys/devices/system/cpu/cpufreq/interactive/boostpulse
echo "80000" > /sys/devices/system/cpu/cpufreq/interactive/boostpulse_duration
echo "80" > /sys/devices/system/cpu/cpufreq/interactive/go_hispeed_load
echo "1958400" > /sys/devices/system/cpu/cpufreq/interactive/hispeed_freq
echo "1" > /sys/devices/system/cpu/cpufreq/interactive/io_is_busy
echo "60000" > /sys/devices/system/cpu/cpufreq/interactive/min_sample_time
echo "100000" > /sys/devices/system/cpu/cpufreq/interactive/sampling_down_factor
echo "1036800" > /sys/devices/system/cpu/cpufreq/interactive/sync_freq
echo "85 1800000:90 2100000:70" > /sys/devices/system/cpu/cpufreq/interactive/target_loads
echo "20000" > /sys/devices/system/cpu/cpufreq/interactive/timer_rate
echo "20000" > /sys/devices/system/cpu/cpufreq/interactive/timer_slack
echo "1190400" > /sys/devices/system/cpu/cpufreq/interactive/up_threshold_any_cpu_freq
echo "50" > /sys/devices/system/cpu/cpufreq/interactive/up_threshold_any_cpu_load
echo "1" > /sys/devices/system/cpu/cpufreq/interactive/bk_locked
busybox sleep 0.5s
busybox sync
fi
if [ "wheatley - standard" == "$2" ]; then
echo "5" > /sys/devices/system/cpu/cpufreq/wheatley/allowed_misses
echo "0" > /sys/devices/system/cpu/cpufreq/wheatley/ignore_nice_load
echo "0" > /sys/devices/system/cpu/cpufreq/wheatley/io_is_busy
echo "0" > /sys/devices/system/cpu/cpufreq/wheatley/powersave_bias
echo "1" > /sys/devices/system/cpu/cpufreq/wheatley/sampling_down_factor
echo "10000" > /sys/devices/system/cpu/cpufreq/wheatley/sampling_rate
echo "10000" > /sys/devices/system/cpu/cpufreq/wheatley/sampling_rate_min
echo "10000" > /sys/devices/system/cpu/cpufreq/wheatley/target_residency
echo "95" > /sys/devices/system/cpu/cpufreq/wheatley/up_threshold
busybox sleep 0.5s
busybox sync
fi
if [ "smartmax - standard" == "$2" ]; then
echo "1036800" > /sys/devices/system/cpu/cpufreq/smartmax/awake_ideal_freq
echo "1" > /sys/devices/system/cpu/cpufreq/smartmax/boost_duration
echo "1497600" > /sys/devices/system/cpu/cpufreq/smartmax/boost_freq
echo "0" > /sys/devices/system/cpu/cpufreq/smartmax/debug_mask
echo "60000" > /sys/devices/system/cpu/cpufreq/smartmax/down_rate
echo "1" > /sys/devices/system/cpu/cpufreq/smartmax/ignore_nice
echo "150000" > /sys/devices/system/cpu/cpufreq/smartmax/input_boost_duration
echo "0" > /sys/devices/system/cpu/cpufreq/smartmax/io_is_busy
echo "55" > /sys/devices/system/cpu/cpufreq/smartmax/max_cpu_load
echo "5" > /sys/devices/system/cpu/cpufreq/smartmax/min_cpu_load
echo "10000" > /sys/devices/system/cpu/cpufreq/smartmax/min_sampling_rate
echo "200000" > /sys/devices/system/cpu/cpufreq/smartmax/ramp_down_step
echo "1" > /sys/devices/system/cpu/cpufreq/smartmax/ramp_up_during_boost
echo "200000" > /sys/devices/system/cpu/cpufreq/smartmax/ramp_up_step
echo "30000" > /sys/devices/system/cpu/cpufreq/smartmax/sampling_rate
echo "652800" > /sys/devices/system/cpu/cpufreq/smartmax/suspend_ideal_freq
echo "1497600" > /sys/devices/system/cpu/cpufreq/smartmax/touch_poke_freq
echo "30000" > /sys/devices/system/cpu/cpufreq/smartmax/up_rate
busybox sleep 0.5s
busybox sync
fi
if [ "smartmax_eps - standard" == "$2" ]; then
echo "652800" > /sys/devices/system/cpu/cpufreq/smartmax/awake_ideal_freq
echo "0" > /sys/devices/system/cpu/cpufreq/smartmax/boost_duration
echo "1497600" > /sys/devices/system/cpu/cpufreq/smartmax/boost_freq
echo "0" > /sys/devices/system/cpu/cpufreq/smartmax/debug_mask
echo "60000" > /sys/devices/system/cpu/cpufreq/smartmax/down_rate
echo "1" > /sys/devices/system/cpu/cpufreq/smartmax/ignore_nice
echo "90000" > /sys/devices/system/cpu/cpufreq/smartmax/input_boost_duration
echo "0" > /sys/devices/system/cpu/cpufreq/smartmax/io_is_busy
echo "70" > /sys/devices/system/cpu/cpufreq/smartmax/max_cpu_load
echo "40" > /sys/devices/system/cpu/cpufreq/smartmax/min_cpu_load
echo "10000" > /sys/devices/system/cpu/cpufreq/smartmax/min_sampling_rate
echo "200000" > /sys/devices/system/cpu/cpufreq/smartmax/ramp_down_step
echo "1" > /sys/devices/system/cpu/cpufreq/smartmax/ramp_up_during_boost
echo "200000" > /sys/devices/system/cpu/cpufreq/smartmax/ramp_up_step
echo "30000" > /sys/devices/system/cpu/cpufreq/smartmax/sampling_rate
echo "300000" > /sys/devices/system/cpu/cpufreq/smartmax/suspend_ideal_freq
echo "1036800" > /sys/devices/system/cpu/cpufreq/smartmax/touch_poke_freq
echo "30000" > /sys/devices/system/cpu/cpufreq/smartmax/up_rate
busybox sleep 0.5s
busybox sync
fi
if [ "slim - standard" == "$2" ]; then
echo "3" > /sys/devices/system/cpu/cpufreq/slim/down_differential
echo "1" > /sys/devices/system/cpu/cpufreq/slim/gboost
echo "0" > /sys/devices/system/cpu/cpufreq/slim/ignore_nice_load
echo "1728000,1267200,1267200,1267200" > /sys/devices/system/cpu/cpufreq/slim/input_event_min_freq
echo "500" > /sys/devices/system/cpu/cpufreq/slim/input_event_timeout
echo "300000" > /sys/devices/system/cpu/cpufreq/slim/optimal_freq
echo "0" > /sys/devices/system/cpu/cpufreq/slim/powersave_bias
echo "1" > /sys/devices/system/cpu/cpufreq/slim/sampling_down_factor
echo "30000" > /sys/devices/system/cpu/cpufreq/slim/sampling_rate
echo "10000" > /sys/devices/system/cpu/cpufreq/slim/sampling_rate_min
echo "300000" > /sys/devices/system/cpu/cpufreq/slim/sync_freq
echo "1728000,1728000,1728000,1728000" > /sys/devices/system/cpu/cpufreq/slim/two_phase_freq
echo "15000" > /sys/devices/system/cpu/cpufreq/slim/ui_sampling_rate
echo "95" > /sys/devices/system/cpu/cpufreq/slim/up_threshold
echo "90" > /sys/devices/system/cpu/cpufreq/slim/up_threshold_any_cpu_load
echo "90" > /sys/devices/system/cpu/cpufreq/slim/up_threshold_multi_core
busybox sleep 0.5s
busybox sync
fi
if [ "intellimm - standard" == "$2" ]; then
echo "3" > /sys/devices/system/cpu/cpufreq/intellimm/down_differential
echo "3" > /sys/devices/system/cpu/cpufreq/intellimm/down_differential_multi_c
echo "652800" > /sys/devices/system/cpu/cpufreq/intellimm/freq_down_step
echo "1190400" > /sys/devices/system/cpu/cpufreq/intellimm/freq_down_step_barrier
echo "0" > /sys/devices/system/cpu/cpufreq/intellimm/ignore_nice_load
echo "1574400,1574400,1574400,1574400" > /sys/devices/system/cpu/cpufreq/intellimm/input_event_min_freq
echo "0" > /sys/devices/system/cpu/cpufreq/intellimm/io_is_busy
echo "1728000" > /sys/devices/system/cpu/cpufreq/intellimm/optimal_freq
echo "0" > /sys/devices/system/cpu/cpufreq/intellimm/powersave_bias
echo "1" > /sys/devices/system/cpu/cpufreq/intellimm/sampling_down_factor
echo "50000" > /sys/devices/system/cpu/cpufreq/intellimm/sampling_rate
echo "10000" > /sys/devices/system/cpu/cpufreq/intellimm/sampling_rate_min
echo "0" > /sys/devices/system/cpu/cpufreq/intellimm/shortcut
echo "1728000" > /sys/devices/system/cpu/cpufreq/intellimm/two_phase_freq
echo "95" > /sys/devices/system/cpu/cpufreq/intellimm/up_threshold
echo "80" > /sys/devices/system/cpu/cpufreq/intellimm/up_threshold_any_cpu_load
echo "80" > /sys/devices/system/cpu/cpufreq/intellimm/up_threshold_multi_core
busybox sleep 0.5s
busybox sync
fi
if [ "zzmoove - standard" == "$2" ]; then
echo "1" > /sys/devices/system/cpu/cpufreq/zzmoove/profile_number
busybox sleep 0.5s
busybox sync
fi
if [ "zzmoove - battery" == "$2" ]; then
echo "4" > /sys/devices/system/cpu/cpufreq/zzmoove/profile_number
busybox sleep 0.5s
busybox sync
fi
if [ "zzmoove - optimal" == "$2" ]; then
echo "6" > /sys/devices/system/cpu/cpufreq/zzmoove/profile_number
busybox sleep 0.5s
busybox sync
fi
if [ "zzmoove - performance" == "$2" ]; then
echo "8" > /sys/devices/system/cpu/cpufreq/zzmoove/profile_number
busybox sleep 0.5s
busybox sync
fi
if [ "zzmoove - battery extreme yank" == "$2" ]; then
echo "3" > /sys/devices/system/cpu/cpufreq/zzmoove/profile_number
busybox sleep 0.5s
busybox sync
fi
if [ "zzmoove - battery yank" == "$2" ]; then
echo "2" > /sys/devices/system/cpu/cpufreq/zzmoove/profile_number
busybox sleep 0.5s
busybox sync
fi
if [ "zzmoove - insane" == "$2" ]; then
echo "9" > /sys/devices/system/cpu/cpufreq/zzmoove/profile_number
busybox sleep 0.5s
busybox sync
fi
if [ "zzmoove - battery plus" == "$2" ]; then
echo "5" > /sys/devices/system/cpu/cpufreq/zzmoove/profile_number
busybox sleep 0.5s
busybox sync
fi
if [ "zzmoove - moderate" == "$2" ]; then
echo "7" > /sys/devices/system/cpu/cpufreq/zzmoove/profile_number
busybox sleep 0.5s
busybox sync
fi
if [ "zzmoove - game" == "$2" ]; then
echo "10" > /sys/devices/system/cpu/cpufreq/zzmoove/profile_number
busybox sleep 0.5s
busybox sync
fi
if [ "zzmoove - relax" == "$2" ]; then
echo "11" > /sys/devices/system/cpu/cpufreq/zzmoove/profile_number
busybox sleep 0.5s
busybox sync
fi
if [ "pegasusq - standard" == "$2" ]; then
echo "5" > /sys/devices/system/cpu/cpufreq/pegasusq/down_differential
echo "2265600" > /sys/devices/system/cpu/cpufreq/pegasusq/freq_for_responsiveness
echo "37" > /sys/devices/system/cpu/cpufreq/pegasusq/freq_step
echo "0" > /sys/devices/system/cpu/cpufreq/pegasusq/ignore_nice_load
echo "0" > /sys/devices/system/cpu/cpufreq/pegasusq/io_is_busy
echo "2" > /sys/devices/system/cpu/cpufreq/pegasusq/sampling_down_factor
echo "50000" > /sys/devices/system/cpu/cpufreq/pegasusq/sampling_rate
echo "10000" > /sys/devices/system/cpu/cpufreq/pegasusq/sampling_rate_min
echo "85" > /sys/devices/system/cpu/cpufreq/pegasusq/up_threshold
echo "40" > /sys/devices/system/cpu/cpufreq/pegasusq/up_threshold_at_min_freq
busybox sleep 0.5s
busybox sync
fi
if [ "lionheart - standard" == "$2" ]; then
echo "30" > /sys/devices/system/cpu/cpufreq/lionheart/down_threshold
echo "5" > /sys/devices/system/cpu/cpufreq/lionheart/freq_step
echo "0" > /sys/devices/system/cpu/cpufreq/lionheart/ignore_nice_load
echo "1" > /sys/devices/system/cpu/cpufreq/lionheart/sampling_down_factor
echo "10000" > /sys/devices/system/cpu/cpufreq/lionheart/sampling_rate
echo "10000" > /sys/devices/system/cpu/cpufreq/lionheart/sampling_rate_min
echo "65" > /sys/devices/system/cpu/cpufreq/lionheart/up_threshold
busybox sleep 0.5s
busybox sync
fi
if [ "nightmare - standard" == "$2" ]; then
echo "50" > /sys/devices/system/cpu/cpufreq/nightmare/dec_cpu_load
echo "540000" > /sys/devices/system/cpu/cpufreq/nightmare/freq_for_responsiveness
echo "1890000" > /sys/devices/system/cpu/cpufreq/nightmare/freq_for_responsiveness_max
echo "20" > /sys/devices/system/cpu/cpufreq/nightmare/freq_step
echo "20" > /sys/devices/system/cpu/cpufreq/nightmare/freq_step_at_min_freq
echo "10" > /sys/devices/system/cpu/cpufreq/nightmare/freq_step_dec
echo "10" > /sys/devices/system/cpu/cpufreq/nightmare/freq_step_dec_at_max_freq
echo "30" > /sys/devices/system/cpu/cpufreq/nightmare/freq_up_brake
echo "30" > /sys/devices/system/cpu/cpufreq/nightmare/freq_up_brake_at_min_freq
echo "70" > /sys/devices/system/cpu/cpufreq/nightmare/inc_cpu_load
echo "60" > /sys/devices/system/cpu/cpufreq/nightmare/inc_cpu_load_at_min_freq
echo "60000" > /sys/devices/system/cpu/cpufreq/nightmare/sampling_rate
busybox sleep 0.5s
busybox sync
fi
if [ "impulse - standard" == "$2" ]; then
echo "20000" > /sys/devices/system/cpu/cpufreq/impulse/above_hispeed_delay
echo "1" > /sys/devices/system/cpu/cpufreq/impulse/align_windows
echo "0" > /sys/devices/system/cpu/cpufreq/impulse/boost
echo "0" > /sys/devices/system/cpu/cpufreq/impulse/boostpulse
echo "80000" > /sys/devices/system/cpu/cpufreq/impulse/boostpulse_duration
echo "99" > /sys/devices/system/cpu/cpufreq/impulse/go_hispeed_load
echo "5" > /sys/devices/system/cpu/cpufreq/impulse/go_lowspeed_load
echo "1958400" > /sys/devices/system/cpu/cpufreq/impulse/hispeed_freq
echo "0" > /sys/devices/system/cpu/cpufreq/impulse/max_freq_hysteresis
echo "80000" > /sys/devices/system/cpu/cpufreq/impulse/min_sample_time
echo "0" > /sys/devices/system/cpu/cpufreq/impulse/powersave_bias
echo "90" > /sys/devices/system/cpu/cpufreq/impulse/target_loads
echo "20000" > /sys/devices/system/cpu/cpufreq/impulse/timer_rate
echo "80000" > /sys/devices/system/cpu/cpufreq/impulse/timer_slack
busybox sleep 0.5s
busybox sync
fi
if [ "ondemandplus - standard" == "$2" ]; then
echo "20" >/sys/devices/system/cpu/cpufreq/ondemandplus/down_differential
echo "1728000" >/sys/devices/system/cpu/cpufreq/ondemandplus/inter_hifreq
echo "300000" >/sys/devices/system/cpu/cpufreq/ondemandplus/inter_lofreq
echo "2" >/sys/devices/system/cpu/cpufreq/ondemandplus/inter_staycycles
echo "0" >/sys/devices/system/cpu/cpufreq/ondemandplus/io_is_busy
echo "652800" >/sys/devices/system/cpu/cpufreq/ondemandplus/staycycles_resetfreq
echo "20000" >/sys/devices/system/cpu/cpufreq/ondemandplus/timer_rate
echo "70" >/sys/devices/system/cpu/cpufreq/ondemandplus/up_threshold
busybox sleep 0.5s
busybox sync
fi
if [ "yankactive - standard" == "$2" ]; then
echo "80000" >/sys/devices/system/cpu/cpufreq/yankactive/above_hispeed_delay
echo "0" >/sys/devices/system/cpu/cpufreq/yankactive/boost
echo "20000" >/sys/devices/system/cpu/cpufreq/yankactive/boostpulse_duration
echo "99" >/sys/devices/system/cpu/cpufreq/yankactive/go_hispeed_load
echo "1728000" >/sys/devices/system/cpu/cpufreq/yankactive/hispeed_freq
echo "0" >/sys/devices/system/cpu/cpufreq/yankactive/io_is_busy
echo "20000" >/sys/devices/system/cpu/cpufreq/yankactive/min_sample_time
echo "0" >/sys/devices/system/cpu/cpufreq/yankactive/sampling_down_factor
echo "0" >/sys/devices/system/cpu/cpufreq/yankactive/sync_freq
echo "95" >/sys/devices/system/cpu/cpufreq/yankactive/target_loads
echo "20000" >/sys/devices/system/cpu/cpufreq/yankactive/timer_rate
echo "80000" >/sys/devices/system/cpu/cpufreq/yankactive/timer_slack
echo "0" >/sys/devices/system/cpu/cpufreq/yankactive/up_threshold_any_cpu_freq
echo "0" >/sys/devices/system/cpu/cpufreq/yankactive/up_threshold_any_cpu_load
busybox sleep 0.5s
busybox sync
fi
exit 0
fi
if [ "apply_system_tweaks" == "$1" ]; then
if [ "Off" == "$2" ]; then
echo "5" > /proc/sys/vm/dirty_background_ratio
echo "200" > /proc/sys/vm/dirty_expire_centisecs
echo "20" > /proc/sys/vm/dirty_ratio
echo "500" > /proc/sys/vm/dirty_writeback_centisecs
echo "3008" > /proc/sys/vm/min_free_kbytes
echo "130" > /proc/sys/vm/swappiness
echo "100" > /proc/sys/vm/vfs_cache_pressure
echo "0" > /proc/sys/vm/drop_caches
busybox sleep 0.5s
busybox sync
fi
if [ "Boeffla tweaks" == "$2" ]; then
echo "70" > /proc/sys/vm/dirty_background_ratio
echo "250" > /proc/sys/vm/dirty_expire_centisecs
echo "90" > /proc/sys/vm/dirty_ratio
echo "500" > /proc/sys/vm/dirty_writeback_centisecs
echo "4096" > /proc/sys/vm/min_free_kbytes
echo "130" > /proc/sys/vm/swappiness
echo "10" > /proc/sys/vm/vfs_cache_pressure
echo "3" > /proc/sys/vm/drop_caches
busybox sleep 0.5s
busybox sync
fi
if [ "Speedmod tweaks" == "$2" ]; then
echo "5" > /proc/sys/vm/dirty_background_ratio
echo "200" > /proc/sys/vm/dirty_expire_centisecs
echo "20" > /proc/sys/vm/dirty_ratio
echo "1500" > /proc/sys/vm/dirty_writeback_centisecs
echo "12288" > /proc/sys/vm/min_free_kbytes
echo "0" > /proc/sys/vm/swappiness
echo "100" > /proc/sys/vm/vfs_cache_pressure
echo "0" > /proc/sys/vm/drop_caches
busybox sleep 0.5s
busybox sync
fi
if [ "Mattiadj tweaks" == "$2" ]; then
echo "10" > /proc/sys/vm/dirty_background_ratio
echo "500" > /proc/sys/vm/dirty_expire_centisecs
echo "10" > /proc/sys/vm/dirty_ratio
echo "100" > /proc/sys/vm/dirty_writeback_centisecs
echo "8192" > /proc/sys/vm/min_free_kbytes
echo "150" > /proc/sys/vm/swappiness
echo "500" > /proc/sys/vm/vfs_cache_pressure
echo "0" > /proc/sys/vm/drop_caches
busybox sleep 0.5s
busybox sync
fi
exit 0
fi
if [ "apply_eq_bands" == "$1" ]; then
# echo "1 4027 1031 0 276" > /sys/class/misc/boeffla_sound/eq_bands
# echo "2 8076 61555 456 456" > /sys/class/misc/boeffla_sound/eq_bands
# echo "3 7256 62323 2644 1368" > /sys/class/misc/boeffla_sound/eq_bands
# echo "4 5774 63529 1965 4355" > /sys/class/misc/boeffla_sound/eq_bands
# echo "5 1380 1369 0 16384" > /sys/class/misc/boeffla_sound/eq_bands
exit 0
fi
if [ "apply_ext4_tweaks" == "$1" ]; then
if [ "1" == "$2" ]; then
busybox sync
mount -o remount,commit=20,noatime $CACHE_DEVICE /cache
busybox sync
mount -o remount,commit=20,noatime $DATA_DEVICE /data
busybox sync
fi
if [ "0" == "$2" ]; then
busybox sync
mount -o remount,commit=0,noatime $CACHE_DEVICE /cache
busybox sync
mount -o remount,commit=0,noatime $DATA_DEVICE /data
busybox sync
fi
exit 0
fi
if [ "apply_survival_script" == "$1" ]; then
if [ "1" == "$2" ]; then
mount -o remount,rw -t ext4 $SYSTEM_DEVICE /system
busybox mkdir -p /system/addon.d
busybox cp /res/misc/97-boeffla-kernel.sh /system/addon.d
busybox chmod 755 /system/addon.d/97-boeffla-kernel.sh
busybox sync
mount -o remount,ro -t ext4 $SYSTEM_DEVICE /system
fi
if [ "0" == "$2" ]; then
mount -o remount,rw -t ext4 $SYSTEM_DEVICE /system
busybox rm /system/addon.d/97-boeffla-kernel.sh
busybox sync
mount -o remount,ro -t ext4 $SYSTEM_DEVICE /system
fi
exit 0
fi
if [ "apply_zram" == "$1" ]; then
busybox swapoff /dev/block/vnswap0
busybox sync
busybox sleep 0.2s
if [ "1" == "$2" ]; then
echo "$4" > /sys/block/vnswap0/disksize
busybox mkswap /dev/block/vnswap0
busybox sleep 0.2s
busybox sync
busybox swapon -p 2 /dev/block/vnswap0
busybox sleep 0.1s
busybox sync
echo "130" > /proc/sys/vm/swappiness
fi
exit 0
fi
if [ "apply_cifs" == "$1" ]; then
if [ "1" == "$2" ]; then
insmod $LIBPATH/cifs.ko
fi
if [ "0" == "$2" ]; then
rmmod $LIBPATH/cifs.ko
fi
exit 0
fi
if [ "apply_nfs" == "$1" ]; then
if [ "1" == "$2" ]; then
insmod $LIBPATH/sunrpc.ko
insmod $LIBPATH/auth_rpcgss.ko
insmod $LIBPATH/lockd.ko
insmod $LIBPATH/nfs.ko
fi
if [ "0" == "$2" ]; then
rmmod $LIBPATH/nfs.ko
rmmod $LIBPATH/lockd.ko
rmmod $LIBPATH/auth_rpcgss.ko
rmmod $LIBPATH/sunrpc.ko
fi
exit 0
fi
if [ "apply_xbox" == "$1" ]; then
if [ "1" == "$2" ]; then
insmod $LIBPATH/xpad.ko
fi
if [ "0" == "$2" ]; then
rmmod $LIBPATH/xpad.ko
fi
exit 0
fi
if [ "apply_exfat" == "$1" ]; then
if [ "1" == "$2" ]; then
insmod $LIBPATH/exfat_core.ko
insmod $LIBPATH/exfat_fs.ko
fi
if [ "0" == "$2" ]; then
rmmod $LIBPATH/exfat_fs.ko
rmmod $LIBPATH/exfat_core.ko
fi
exit 0
fi
if [ "apply_usb_ethernet" == "$1" ]; then
if [ "1" == "$2" ]; then
insmod $LIBPATH/asix.ko
netcfg eth0 up
dhcpcd eth0
DNS=`getprop net.eth0.dns1`
ndc resolver setifdns eth0 "" $DNS 8.8.8.8
ndc resolver setdefaultif eth0
fi
if [ "0" == "$2" ]; then
rmmod $LIBPATH/asix.ko
netcfg eth0 down
fi
exit 0
fi
if [ "apply_ntfs" == "$1" ]; then
if [ "1" == "$2" ]; then
insmod $LIBPATH/ntfs.ko
fi
if [ "0" == "$2" ]; then
rmmod $LIBPATH/ntfs.ko
fi
exit 0
fi
if [ "apply_ums" == "$1" ]; then
# if [ "1" == "$2" ]; then
# busybox umount -l /mnt/extSdCard
# busybox umount -l /storage/sdcard1
# busybox umount -l /mnt/media_rw/sdcard1
# busybox umount -l /mnt/secure/asec
# /system/bin/setprop persist.sys.usb.config mass_storage,adb
# echo /dev/block/vold/179:49 > /sys/devices/platform/s3c-usbgadget/gadget/lun0/file
# fi
#
# if [ "0" == "$2" ]; then
# echo "" > /sys/devices/platform/s3c-usbgadget/gadget/lun0/file
# /system/bin/vold
# /system/bin/setprop persist.sys.usb.config mtp,adb
# fi
exit 0
fi
# *******************
# Actions
# *******************
if [ "action_debug_info_file" == "$1" ]; then
echo $(date) Full debug log file start > $2
echo -e "\n============================================\n" >> $2
echo -e "\n**** Boeffla-Kernel version\n" >> $2
cat /proc/version >> $2
echo -e "\n**** Firmware information\n" >> $2
busybox grep ro.build.version /system/build.prop >> $2
echo -e "\n============================================\n" >> $2
echo -e "\n**** Boeffla-Kernel log\n" >> $2
cat /sdcard/boeffla-kernel-data/boeffla-kernel.log >> $2
echo -e "\n**** Boeffla-Kernel log 1\n" >> $2
cat /sdcard/boeffla-kernel-data/boeffla-kernel.log.1 >> $2
echo -e "\n**** Boeffla-Kernel log 2\n" >> $2
cat /sdcard/boeffla-kernel-data/boeffla-kernel.log.2 >> $2
echo -e "\n**** Boeffla-Kernel log 3\n" >> $2
cat /sdcard/boeffla-kernel-data/boeffla-kernel.log.3 >> $2
echo -e "\n============================================\n" >> $2
echo -e "\n**** Boeffla-Config app log\n" >> $2
cat /sdcard/boeffla-kernel-data/bc.log >> $2
echo -e "\n**** Boeffla-Config app log 1\n" >> $2
cat /sdcard/boeffla-kernel-data/bc.log.1 >> $2
echo -e "\n**** Boeffla-Config app log 2\n" >> $2
cat /sdcard/boeffla-kernel-data/bc.log.2 >> $2
echo -e "\n**** Boeffla-Config app log 3\n" >> $2
cat /sdcard/boeffla-kernel-data/bc.log.3 >> $2
echo -e "\n**** Boeffla-Config crash log\n" >> $2
cat /sdcard/boeffla-kernel-data/bc.crashlog >> $2
echo -e "\n============================================\n" >> $2
echo -e "\n**** boeffla_sound\n" >> $2
cd /sys/class/misc/boeffla_sound
busybox find * -print -maxdepth 0 -type f -exec tail -v -n +1 {} + >> $2
echo "\n============================================\n" >> $2
echo -e "\n**** SELinux:\n" >> $2
getenforce >> $2
echo -e "\n**** Loaded modules:\n" >> $2
lsmod >> $2
echo -e "\n**** CPU information:\n" >> $2
cd /sys/devices/system/cpu/cpu0/cpufreq
busybox find * -print -maxdepth 0 -type f -exec tail -v -n +1 {} + >> $2
cd /sys/devices/system/cpu/cpu1/cpufreq
busybox find * -print -maxdepth 0 -type f -exec tail -v -n +1 {} + >> $2
cd /sys/devices/system/cpu/cpu2/cpufreq
busybox find * -print -maxdepth 0 -type f -exec tail -v -n +1 {} + >> $2
cd /sys/devices/system/cpu/cpu3/cpufreq
busybox find * -print -maxdepth 0 -type f -exec tail -v -n +1 {} + >> $2
echo -e "\n**** GPU information:\n" >> $2
echo -e "\n**** Root:\n" >> $2
ls /system/xbin/su >> $2
ls /system/app/Superuser.apk >> $2
echo -e "\n**** Busybox:\n" >> $2
ls /sbin/busybox >> $2
ls /system/bin/busybox >> $2
ls /system/xbin/busybox >> $2
echo -e "\n**** Mounts:\n" >> $2
mount | busybox grep /data >> $2
mount | busybox grep /cache >> $2
echo -e "\n**** SD Card read ahead:\n" >> $2
cat /sys/block/mmcblk0/bdi/read_ahead_kb >> $2
cat /sys/block/mmcblk1/bdi/read_ahead_kb >> $2
echo -e "\n**** Various kernel settings by config app:\n" >> $2
echo -e "\n(gov prof, cpu volt prof, gpu freq prof, gpu volt prof, eq prof, mdnie over, sys tweaks, swapp over)\n" >> $2
cat /dev/bk_governor_profile >> $2
cat /dev/bk_cpu_voltages_profile >> $2
cat /dev/bk_gpu_frequencies_profile >> $2
cat /dev/bk_gpu_voltages_profile >> $2
cat /dev/bk_system_tweaks >> $2
cat /dev/bk_swappiness_overwrite >> $2
echo -e "\n**** Touch boost:\n" >> $2
cd /sys/class/misc/touchboost_switch
busybox find * -print -maxdepth 0 -type f -exec tail -v -n +1 {} + >> $2
echo -e "\n**** Charging levels (ac/usb/wireless) and Charging instable power / ignore safety margin:\n" >> $2
cd /sys/kernel/charge_levels
busybox find * -print -maxdepth 0 -type f -exec tail -v -n +1 {} + >> $2
echo -e "\n**** Governor:\n" >> $2
cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor >> $2
cat /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor >> $2
cat /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor >> $2
cat /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor >> $2
echo -e "\n**** Governor hard:\n" >> $2
cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor_hard >> $2
cat /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor_hard >> $2
cat /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor_hard >> $2
cat /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor_hard >> $2
echo -e "\n**** Scheduler:\n" >> $2
cat /sys/block/mmcblk0/queue/scheduler >> $2
cat /sys/block/mmcblk1/queue/scheduler >> $2
echo -e "\n**** Scheduler hard:\n" >> $2
cat /sys/block/mmcblk0/queue/scheduler_hard >> $2
cat /sys/block/mmcblk1/queue/scheduler_hard >> $2
echo -e "\n**** Kernel Logger:\n" >> $2
cat /sys/kernel/printk_mode/printk_mode >> $2
echo -e "\n**** Android Logger:\n" >> $2
cat /sys/kernel/logger_mode/logger_mode >> $2
echo -e "\n**** zRam information:\n" >> $2
busybox find /sys/block/zram*/* -print -maxdepth 0 -type f -exec tail -v -n +1 {} + >> $2
echo -e "\n**** Uptime:\n" >> $2
cat /proc/uptime >> $2
echo -e "\n**** Frequency usage table:\n" >> $2
cat /sys/devices/system/cpu/cpu0/cpufreq/stats/time_in_state >> $2
echo -e "\n**** Memory:\n" >> $2
busybox free -m >> $2
echo -e "\n**** Meminfo:\n" >> $2
cat /proc/meminfo >> $2
echo -e "\n**** Swap:\n" >> $2
cat /proc/swaps >> $2
echo -e "\n**** Low memory killer:\n" >> $2
cat /sys/module/lowmemorykiller/parameters/minfree >> $2
echo -e "\n**** Swappiness:\n" >> $2
cat /proc/sys/vm/swappiness >> $2
echo -e "\n**** Storage:\n" >> $2
busybox df >> $2
echo -e "\n**** Mounts:\n" >> $2
mount >> $2
echo -e "\n**** Governor tuneables\n" >> $2
GOVERNOR=`cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor`
cd /sys/devices/system/cpu/cpufreq/$GOVERNOR
busybox find * -print -maxdepth 0 -type f -exec tail -v -n +1 {} + >> $2
echo -e "\n============================================\n" >> $2
echo -e "\n**** /data/app folder\n" >> $2
ls -l /data/app >> $2
echo -e "\n**** /system/app folder\n" >> $2
ls -l /system/app >> $2
echo -e "\n============================================\n" >> $2
echo -e "\n**** /system/etc/init.d folder\n" >> $2
ls -l /system/etc/init.d >> $2
echo -e "\n**** /etc/init.d folder\n" >> $2
ls -l /etc/init.d >> $2
echo -e "\n**** /data/init.d folder\n" >> $2
ls -l /data/init.d >> $2
echo -e "\n============================================\n" >> $2
echo -e "\n**** last_kmsg\n" >> $2
cat /proc/last_kmsg >> $2
echo -e "\n============================================\n" >> $2
echo -e "\n**** dmesg\n" >> $2
dmesg >> $2
echo -e "\n============================================\n" >> $2
echo $(date) Full debug log file end >> $2
busybox chmod 666 $2
exit 0
fi
if [ "action_reboot" == "$1" ]; then
echo 0 > /sys/kernel/dyn_fsync/Dyn_fsync_active
busybox sync
busybox sleep 1s
/system/bin/reboot
exit 0
fi
if [ "action_reboot_cwm" == "$1" ]; then
echo 0 > /sys/kernel/dyn_fsync/Dyn_fsync_active
busybox sync
busybox sleep 1s
/system/bin/reboot recovery
exit 0
fi
if [ "action_reboot_download" == "$1" ]; then
echo 0 > /sys/kernel/dyn_fsync/Dyn_fsync_active
busybox sync
busybox sleep 1s
/system/bin/reboot download
exit 0
fi
if [ "action_wipe_caches_reboot" == "$1" ]; then
echo 0 > /sys/kernel/dyn_fsync/Dyn_fsync_active
busybox rm -rf /cache/*
busybox rm -rf /data/dalvik-cache/*
busybox sync
busybox sleep 1s
/system/bin/reboot
exit 0
fi
if [ "action_wipe_cache" == "$1" ]; then
busybox rm -rf /cache/*
busybox sync
busybox sleep 1s
exit 0
fi
if [ "action_wipe_clipboard_cache" == "$1" ]; then
busybox rm -rf /data/clipboard/*
busybox sync
exit 0
fi
if [ "action_clean_initd" == "$1" ]; then
busybox tar cvz -f $2 /system/etc/init.d
mount -o remount,rw -t ext4 $SYSTEM_DEVICE /system
busybox rm /system/etc/init.d/*
busybox sync
mount -o remount,ro -t ext4 $SYSTEM_DEVICE /system
exit 0
fi
if [ "action_fix_permissions" == "$1" ]; then
mount -o remount,rw -t ext4 $SYSTEM_DEVICE /system
# User apps
busybox chmod 644 /data/app/*.apk
busybox chown 1000:1000 /data/app/*.apk
# System apps
busybox chmod 644 /system/app/*.apk
busybox chown 0:0 /system/app/*.apk
# System framework
busybox chmod 644 /system/framework/*.apk
busybox chown 0:0 /system/framework/*.apk
busybox chmod 644 /system/framework/*.jar
busybox chown 0:0 /system/framework/*.jar
mount -o remount,ro -t ext4 $SYSTEM_DEVICE /system
busybox sync
exit 0
fi
if [ "action_fstrim" == "$1" ]; then
echo -e "Trim /data"
/sbin/busybox fstrim -v /data
echo -e ""
echo -e "Trim /cache"
/sbin/busybox fstrim -v /cache
echo -e ""
echo -e "Trim /system"
/sbin/busybox fstrim -v /system
echo -e ""
busybox sync
exit 0
fi
if [ "flash_kernel" == "$1" ]; then
setenforce 0
busybox dd if=$2 of=$BOOT_DEVICE
exit 0
fi
if [ "archive_kernel" == "$1" ]; then
IMGPATH=$2
cd ${IMGPATH%/*}
busybox rm $3.tar
busybox rm $3.tar.md5
busybox tar cvf $3.tar ${IMGPATH##*/}
busybox md5sum $3.tar >> $3.tar
busybox mv $3.tar $3.tar.md5
busybox chmod 666 $3.tar.md5
busybox rm $2
busybox sync
exit 0
fi
if [ "extract_kernel" == "$1" ]; then
busybox tar -xvf $2 -C $3
exit 0
fi
if [ "flash_recovery" == "$1" ]; then
setenforce 0
busybox dd if=$2 of=$RECOVERY_DEVICE
exit 0
fi
if [ "extract_recovery" == "$1" ]; then
busybox tar -xvf $2 -C $3
exit 0
fi
if [ "flash_modem" == "$1" ]; then
setenforce 0
busybox dd if=$2 of=$RADIO_DEVICE
exit 0
fi
if [ "extract_modem" == "$1" ]; then
busybox tar -xvf $2 -C $3
exit 0
fi
if [ "flash_cm_kernel" == "$1" ]; then
setenforce 0
busybox dd if=$2/boot.img of=$BOOT_DEVICE
mount -o remount,rw -t ext4 $SYSTEM_DEVICE /system
busybox mkdir -p $LIBPATH
busybox chmod 755 $LIBPATH
busybox rm -f $LIBPATH/*
busybox cp $2$LIBPATH/* $LIBPATH
busybox chmod 644 $LIBPATH/*
busybox sync
mount -o remount,ro -t ext4 $SYSTEM_DEVICE /system
exit 0
fi
if [ "extract_cm_kernel" == "$1" ]; then
busybox unzip $2 -d $3
exit 0
fi
|
andip71/boeffla-kernel-samsung-s5
|
ramdisk_boeffla/fs/res/bc/bccontroller.sh
|
Shell
|
gpl-2.0
| 58,261 |
ID=$(sudo docker run --privileged -d -p 22 joshjdevl/firefox /usr/sbin/sshd -D)
ID=${ID:0:12}
echo $ID
#PORT=$(sudo docker port $ID 22)
PORT=$(sudo docker inspect $ID | grep HostPort | tail -1 | awk '{print $2}' | tr -d '",\n’')
ssh -X chromium@localhost -p ${PORT}
|
joshjdevl/docker-firefox
|
ssh.sh
|
Shell
|
gpl-2.0
| 270 |
#!/usr/bin/env bash
filename=$1
if [ -z $filename ]
then #test if the parameter exists
filename="default_map"
fi
if [ -e "$filename.yaml" ]
then #test if the file already exists
echo "$filename already exists. overrwite (Y/N)"
read answer
if test "$answer" != "Y" -a "$answer" != "y"
then exit 0;
fi
fi
#the filename does not exist or overrwrite is requested
rosrun map_server map_saver -f $filename
|
robotology/ikart
|
app/iKartRos/launch/save_map.sh
|
Shell
|
gpl-2.0
| 433 |
#!/bin/bash
mkdir logs/
MATLAB_INCLUDE="/usr/local/MATLAB/R2013b/extern/include"
MATLAB_LIB="/usr/local/MATLAB/R2013b/bin/glnxa64"
MATLAB_MAP_FILE="/usr/local/MATLAB/R2013b/extern/lib/glnxa64/mexFunction.map"
SOCKET_INCLUDE="../socket_functions"
icc -fpic -shared -DMATLAB_MEX_FILE -fno-omit-frame-pointer -pthread -I $MATLAB_INCLUDE -I $SOCKET_INCLUDE svd_mex_client.c "$SOCKET_INCLUDE"/socket_functions.c -L"$MATLAB_LIB" -Wl,--version-script,"$MATLAB_MAP_FILE" -lmex -lmx -lmat -lm -o svd_mex_client.mexa64
icc -mkl -openmp -I "$SOCKET_INCLUDE" svd_with_socket_server_intel_mkl.c "$SOCKET_INCLUDE"/socket_functions.c matrix_vector_functions_intel_mkl.c -o svd_with_socket_server_intel_mkl
icc -fpic -shared -DMATLAB_MEX_FILE -fno-omit-frame-pointer -pthread -I $MATLAB_INCLUDE -I $SOCKET_INCLUDE qr_full_rank_mex_client.c "$SOCKET_INCLUDE"/socket_functions.c -L"$MATLAB_LIB" -Wl,--version-script,"$MATLAB_MAP_FILE" -lmex -lmx -lmat -lm -o qr_full_rank_mex_client.mexa64
icc -mkl -openmp -I "$SOCKET_INCLUDE" qr_full_rank_with_socket_server_intel_mkl.c "$SOCKET_INCLUDE"/socket_functions.c matrix_vector_functions_intel_mkl.c -o qr_full_rank_with_socket_server_intel_mkl
|
sergeyvoronin/MatlabMexSockets
|
example_functions_mkl/compile.sh
|
Shell
|
gpl-2.0
| 1,186 |
#!/res/busybox sh
export PATH=/res/asset:$PATH
echo "1" > /sys/module/intelli_plug/parameters/intelli_plug_active
echo "1" > /sys/module/msm_thermal/parameters/enabled
echo "0" > /sys/module/msm_thermal/core_control/enabled
echo "1" > /sys/devices/system/cpu/cpu1/online
echo "1" > /sys/devices/system/cpu/cpu2/online
echo "1" > /sys/devices/system/cpu/cpu3/online
source /sbin/cpu.sh
source /sbin/arteractive.sh
DEFAULTPOLLMS=$(cat /sys/module/msm_thermal/parameters/poll_ms)
echo "50" > /sys/module/msm_thermal/parameters/poll_ms
if [[ $(cat /data/.arter97/vnswap) == "1" ]]; then
/sbin/sswap -s
fi
while ! pgrep com.android ; do
sleep 1
done
sleep 1
while pgrep bootanimation ; do
sleep 1
done
sleep 1
while pgrep dexopt ; do
sleep 1
done
echo "1" > /sys/module/msm_thermal/core_control/enabled
echo "$DEFAULTPOLLMS" > /sys/module/msm_thermal/parameters/poll_ms
source /sbin/arteractive.sh
if [ -e /arter97 ] ; then
fstrim -v /arter97/data
else
fstrim -v /system
fstrim -v /cache
fstrim -v /data
fi
sync
echo "1" > /proc/sys/vm/compact_memory
|
1N4148/android_kernel_samsung_msm8974
|
ramdisk/sbin/boot.sh
|
Shell
|
gpl-2.0
| 1,068 |
#!/bin/bash
# Skrypt uruchamiany przy starcie systemu. Dopoki nie zamontuje to proboje.
if [ -f /tmp/sshfsmonted ] ; then
echo "Zamontowane juz."
exit 0
else
is_not_mounted=1
while [ $is_not_mounted -ne 0 ] ; do
sshfs jpasz@jpaszpc:Dokumenty/Skany Dokumenty/Skany -o rw
sshfs jpasz@jpaszpc:Udostepnione /home/spasz/Udostepnione/ -o rw
is_not_mounted=$?
sleep 120
# Czekamy 2 minuty i probujemy znowu.
done
touch /tmp/sshfsmonted
fi
|
folkien/scripts
|
mountsshfs.sh
|
Shell
|
gpl-2.0
| 466 |
#!/bin/sh
ssh -p 22 [email protected] -D 127.0.0.1:7071 # 建立 ssh channel,并且不要关闭这个终端或者退出 ssh
|
averainy/averainy
|
shell/ssh_socks.sh
|
Shell
|
gpl-2.0
| 128 |
#!/bin/bash
# MSM8X26 L release kernel build script v0.3
# [email protected]
# [email protected] <Customized>
BUILD_TOP_DIR=..
BUILD_KERNEL_DIR=$(pwd)
BUILD_CROSS_COMPILE=$(pwd)/arm-linux-androideabi-4.9/bin/arm-linux-androideabi-
BUILD_JOB_NUMBER=`grep processor /proc/cpuinfo|wc -l`
KERNEL_DEFCONFIG=msm8226-sec_defconfig
SELINUX_DEFCONFIG=selinux_defconfig
shift $((OPTIND-1))
BUILD_COMMAND=$1
MODEL=${BUILD_COMMAND%%_*}
TEMP=${BUILD_COMMAND#*_}
REGION=${TEMP%%_*}
CARRIER=${TEMP##*_}
echo "######### PARAMETERS ##########"
echo "MODEL = $MODEL"
echo "TEMP = $TEMP"
echo "REGION = $REGION"
echo "CARRIER = $CARRIER"
echo "######### END #########"
if [[ "$BUILD_COMMAND" == "ms01lte"* ]]; then # MS01_LTE
VARIANT=${MODEL}_${CARRIER}
DTS_NAMES=msm8926-sec-ms01lteeur-r
elif [[ "$BUILD_COMMAND" == "ms013g"* ]]; then # MS01_3g
VARIANT=${MODEL}_${CARRIER}
DTS_NAMES=msm8226-sec-ms013geur-r
elif [[ "$BUILD_COMMAND" == "s3ve3g"* ]]; then # S3ve_3g
VARIANT=${MODEL}_${CARRIER}
DTS_NAMES=msm8226-sec-s3ve3geur-r
elif [[ "$BUILD_COMMAND" == "milletlte_vzw"* ]]; then
VARIANT=${MODEL}_${CARRIER}
DTS_NAMES=msm8926-sec-milletltevzw-r
#DTS_NAMES=msm8926-sec-milletlte_tmo-r
elif [[ "$BUILD_COMMAND" == "NewModel"* ]]; then # New Model
VARIANT=NewModel${CARRIER}
DTS_NAMES=NewModel-DTS-NAME
else
DTS_NAMES=
fi
PROJECT_NAME=${VARIANT}
if [ "$MODEL" == "ms01lte" ]; then
VARIANT_DEFCONFIG=msm8926-sec_${MODEL}_${CARRIER}_defconfig
elif [ "$MODEL" == "milletlte" ]; then
VARIANT_DEFCONFIG=msm8926-sec_${MODEL}_${CARRIER}_defconfig
else
VARIANT_DEFCONFIG=msm8226-sec_${MODEL}_${CARRIER}_defconfig
fi
case $1 in
clean)
echo "Clean..."
#make -C $BUILD_KERNEL_DIR clean
#make -C $BUILD_KERNEL_DIR distclean
BUILD_KERNEL_OUT_DIR=$BUILD_TOP_DIR/okernel/*$2
echo "remove kernel out directory $BUILD_KERNEL_OUT_DIR"
rm $BUILD_KERNEL_OUT_DIR -rf
exit 1
;;
*)
BUILD_KERNEL_OUT_DIR=$BUILD_TOP_DIR/okernel/$BUILD_COMMAND
PRODUCT_OUT=$BUILD_TOP_DIR/okernel/$BUILD_COMMAND
mkdir -p $BUILD_KERNEL_OUT_DIR
;;
esac
KERNEL_ZIMG=$BUILD_KERNEL_OUT_DIR/arch/arm/boot/zImage
DTC=$BUILD_KERNEL_OUT_DIR/scripts/dtc/dtc
FUNC_APPEND_DTB()
{
if ! [ -d $BUILD_KERNEL_OUT_DIR/arch/arm/boot ] ; then
echo "error no directory : "$BUILD_KERNEL_OUT_DIR/arch/arm/boot""
exit -1
else
echo "rm files in : "$BUILD_KERNEL_OUT_DIR/arch/arm/boot/*-zImage""
rm $BUILD_KERNEL_OUT_DIR/arch/arm/boot/*-zImage
echo "rm files in : "$BUILD_KERNEL_OUT_DIR/arch/arm/boot/*.dtb""
rm $BUILD_KERNEL_OUT_DIR/arch/arm/boot/*.dtb
fi
for DTS_FILE in `ls ${BUILD_KERNEL_DIR}/arch/arm/boot/dts/msm8226/${DTS_NAMES}*.dts`
do
DTB_FILE=${DTS_FILE%.dts}.dtb
DTB_FILE=$BUILD_KERNEL_OUT_DIR/arch/arm/boot/${DTB_FILE##*/}
ZIMG_FILE=${DTB_FILE%.dtb}-zImage
echo ""
echo "dts : $DTS_FILE"
echo "dtb : $DTB_FILE"
echo "out : $ZIMG_FILE"
echo ""
$DTC -p 1024 -O dtb -o $DTB_FILE $DTS_FILE
cat $KERNEL_ZIMG $DTB_FILE > $ZIMG_FILE
done
}
INSTALLED_DTIMAGE_TARGET=${BUILD_KERNEL_OUT_DIR}/dt.img
DTBTOOL=$(pwd)/dtbtool/
FUNC_BUILD_DTIMAGE_TARGET()
{
echo ""
echo "================================="
echo "START : FUNC_BUILD_DTIMAGE_TARGET"
echo "================================="
echo ""
echo "DT image target : $INSTALLED_DTIMAGE_TARGET"
if ! [ -e $DTBTOOL ] ; then
if ! [ -d $BUILD_TOP_DIR/out/host/linux-x86/bin ] ; then
mkdir -p $BUILD_TOP_DIR/out/host/linux-x86/bin
fi
cp $(pwd)/dtbtool/ $DTBTOOL
fi
BOARD_KERNEL_PAGESIZE=2048
echo "$DTBTOOL -o $INSTALLED_DTIMAGE_TARGET -s $BOARD_KERNEL_PAGESIZE \
-p $BUILD_KERNEL_OUT_DIR/scripts/dtc/ $BUILD_KERNEL_OUT_DIR/arch/arm/boot/"
$DTBTOOL -o $INSTALLED_DTIMAGE_TARGET -s $BOARD_KERNEL_PAGESIZE \
-p $BUILD_KERNEL_OUT_DIR/scripts/dtc/ $BUILD_KERNEL_OUT_DIR/arch/arm/boot/
chmod a+r $INSTALLED_DTIMAGE_TARGET
echo ""
echo "================================="
echo "END : FUNC_BUILD_DTIMAGE_TARGET"
echo "================================="
echo ""
}
FUNC_BUILD_KERNEL()
{
echo ""
echo "=============================================="
echo "START : FUNC_BUILD_KERNEL"
echo "=============================================="
echo ""
echo "build project="$PROJECT_NAME""
echo "build common config="$KERNEL_DEFCONFIG ""
echo "build variant config="$VARIANT_DEFCONFIG ""
echo "build secure option="$SECURE_OPTION ""
echo "build SEANDROID option="$SEANDROID_OPTION ""
if [ "$BUILD_COMMAND" == "" ]; then
SECFUNC_PRINT_HELP;
exit -1;
fi
if [ "$TEMP" == "$MODEL" ]; then
KERNEL_DEFCONFIG=msm8226-sec_${MODEL}_defconfig
make -C $BUILD_KERNEL_DIR O=$BUILD_KERNEL_OUT_DIR -j$BUILD_JOB_NUMBER ARCH=arm \
CROSS_COMPILE=$BUILD_CROSS_COMPILE \
$KERNEL_DEFCONFIG \
DEBUG_DEFCONFIG=$DEBUG_DEFCONFIG SELINUX_DEFCONFIG=$SELINUX_DEFCONFIG \
SELINUX_LOG_DEFCONFIG=$SELINUX_LOG_DEFCONFIG || exit -1
else
make -C $BUILD_KERNEL_DIR O=$BUILD_KERNEL_OUT_DIR -j$BUILD_JOB_NUMBER ARCH=arm \
CROSS_COMPILE=$BUILD_CROSS_COMPILE \
$KERNEL_DEFCONFIG VARIANT_DEFCONFIG=$VARIANT_DEFCONFIG \
DEBUG_DEFCONFIG=$DEBUG_DEFCONFIG SELINUX_DEFCONFIG=$SELINUX_DEFCONFIG \
SELINUX_LOG_DEFCONFIG=$SELINUX_LOG_DEFCONFIG || exit -1
fi
make -C $BUILD_KERNEL_DIR O=$BUILD_KERNEL_OUT_DIR -j$BUILD_JOB_NUMBER ARCH=arm \
CROSS_COMPILE=$BUILD_CROSS_COMPILE || exit -1
FUNC_APPEND_DTB
FUNC_BUILD_DTIMAGE_TARGET
echo ""
echo "================================="
echo "END : FUNC_BUILD_KERNEL"
echo "================================="
echo ""
}
FUNC_EXT_MODULES_TARGET()
{
echo ""
echo "===================================="
echo " START : FUNC_EXT_MODULES_TARGET"
echo "===================================="
echo ""
OUT_MODU=$BUILD_TOP_DIR/mkernel/$BUILD_COMMAND
rm -rf $OUT_MODU
mkdir -p $OUT_MODU
find $BUILD_KERNEL_OUT_DIR -name "*.ko" -exec cp -fv {} $OUT_MODU \;
echo ""
echo "===================================="
echo " END : FUNC_EXT_MODULES_TARGET"
echo "===================================="
echo ""
}
SECFUNC_PRINT_HELP()
{
echo -e '\E[33m'
echo "Help"
echo "$0 \$1 \$2"
echo " \$1 : "
echo " ms013g"
echo " s3ve3g"
echo " s3ve3g_eur"
echo " ms013g_eur"
echo " ms01lte_eur"
echo " milletlte_vzw"
echo " \$2 : <only when \$1 is clean>"
echo " ms013g"
echo " s3ve3g"
echo " s3ve3g_eur"
echo " ms013g_eur"
echo " ms01lte_eur"
echo " milletlte_vzw"
echo -e '\E[0m'
}
# MAIN FUNCTION
rm -rf ./build.log
(
START_TIME=`date +%s`
FUNC_BUILD_KERNEL
FUNC_EXT_MODULES_TARGET
cp -vf $KERNEL_ZIMG $BUILD_KERNEL_OUT_DIR
END_TIME=`date +%s`
let "ELAPSED_TIME=$END_TIME-$START_TIME"
echo "Total compile time is $ELAPSED_TIME seconds"
) 2>&1 | tee -a ./build.log
|
aditheking/G7102_MM_SWA_Opensource
|
build_msm8x26.sh
|
Shell
|
gpl-2.0
| 6,708 |
#!/bin/sh
NAME=bfdada.cov
lcov --quiet --base-directory . --directory . -c -o $NAME
lcov --quiet --remove $NAME "/usr*" -o $NAME
lcov --quiet --remove $NAME "/build*" -o $NAME
lcov --quiet --remove $NAME "/opt*" -o $NAME
lcov --quiet --remove $NAME "*/adainclude*" -o $NAME
lcov --quiet --remove $NAME "*/regtests*" -o $NAME
lcov --quiet --remove $NAME "*/b__*" -o $NAME
lcov --quiet --remove $NAME "*/testutil*" -o $NAME
rm -rf cover
genhtml --quiet --ignore-errors source -o ./cover -t "test coverage" --num-spaces 4 $NAME
|
stcarrez/ada-bfd
|
coverage.sh
|
Shell
|
gpl-2.0
| 527 |
#!/bin/sh
##
## Visopsys
## Copyright (C) 1998-2015 J. Andrew McLaughlin
##
## image-usb.sh
##
# Installs the Visopsys system into a zipped USB image file
BLANKUSB=./blankusb.gz
INSTSCRIPT=./install.sh
MOUNTDIR=./tmp_mnt
ZIPLOG=./zip.log
echo ""
echo "Making Visopsys USB IMAGE file"
while [ "$1" != "" ] ; do
# Are we doing a release version? If the argument is "-r" then we use
# the release number in the destination directory name. Otherwise, we
# assume an interim package and use the date instead
if [ "$1" = "-r" ] ; then
# What is the current release version?
RELEASE=`./release.sh`
echo " - doing RELEASE version $RELEASE"
fi
shift
done
# Check for things we need to be in the current directory
for FILE in $BLANKUSB $INSTSCRIPT ; do
if [ ! -f $FILE ] ; then
echo ""
echo "Required file $FILE not found. Terminating"
echo ""
exit 1
fi
done
if [ "$RELEASE" = "" ] ; then
# What is the date?
RELEASE=`date +%Y-%m-%d`
echo " - doing INTERIM version $RELEASE (use -r flag for RELEASES)"
fi
NAME=visopsys-"$RELEASE"
IMAGEFILE="$NAME"-usb.img
ZIPFILE="$NAME"-usb-img.zip
rm -f $IMAGEFILE
cp $BLANKUSB "$IMAGEFILE".gz
gunzip "$IMAGEFILE".gz
# Determine the starting offset of the first partition in the image.
# Assumptions:
# 1. We are installing in the first partition
# 2. The first partition has been made active
# 3. The image was created using 512-byte sectors.
STARTSEC=$(/sbin/fdisk -lu -b512 $IMAGEFILE 2> /dev/null | \
grep ^"$IMAGEFILE"1 | tr -s ' ' | cut -d' ' -f3)
STARTOFF=$(($STARTSEC * 512))
# Connect the virtual partition to a loop device
LOOPDEV=$(/sbin/losetup -f)
/sbin/losetup -o $STARTOFF $LOOPDEV "$IMAGEFILE"
# Run the installation script
$INSTSCRIPT $LOOPDEV
STATUS=$?
# Disconnect the loop device
/sbin/losetup -d $LOOPDEV
if [ $STATUS -ne 0 ] ; then
echo ""
echo "Install failure. Terminating"
echo ""
exit 1
fi
echo -n "Archiving... "
echo "Visopsys $RELEASE USB Image Release" > /tmp/comment
echo "Copyright (C) 1998-2015 J. Andrew McLaughlin" >> /tmp/comment
rm -f $ZIPFILE
zip -9 -z -r $ZIPFILE $IMAGEFILE < /tmp/comment > $ZIPLOG 2>&1
if [ $? -ne 0 ] ; then
echo ""
echo -n "Not able to create zip file $ZIPFILE. "
echo "See $ZIPLOG. Terminating."
echo ""
exit 1
fi
rm -f /tmp/comment $IMAGEFILE $ZIPLOG
echo Done
echo ""
echo "File is: $ZIPFILE"
echo ""
exit 0
|
buddywithgol/visopsys
|
utils/image-usb.sh
|
Shell
|
gpl-2.0
| 2,361 |
#!/bin/sh
#
# Copyright (c) 2007 Shawn Pearce
#
test_description='test git fast-import utility'
. ./test-lib.sh
. "$TEST_DIRECTORY"/diff-lib.sh ;# test-lib chdir's into trash
# Print $1 bytes from stdin to stdout.
#
# This could be written as "head -c $1", but IRIX "head" does not
# support the -c option.
head_c () {
perl -e '
my $len = $ARGV[1];
while ($len > 0) {
my $s;
my $nread = sysread(STDIN, $s, $len);
die "cannot read: $!" unless defined($nread);
print $s;
$len -= $nread;
}
' - "$1"
}
file2_data='file2
second line of EOF'
file3_data='EOF
in 3rd file
END'
file4_data=abcd
file4_len=4
file5_data='an inline file.
we should see it later.'
file6_data='#!/bin/sh
echo "$@"'
>empty
test_expect_success 'setup: have pipes?' '
rm -f frob &&
if mkfifo frob
then
test_set_prereq PIPE
fi
'
###
### series A
###
test_tick
test_expect_success 'empty stream succeeds' '
git fast-import </dev/null
'
cat >input <<INPUT_END
blob
mark :2
data <<EOF
$file2_data
EOF
blob
mark :3
data <<END
$file3_data
END
blob
mark :4
data $file4_len
$file4_data
commit refs/heads/master
mark :5
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
initial
COMMIT
M 644 :2 file2
M 644 :3 file3
M 755 :4 file4
tag series-A
from :5
data <<EOF
An annotated tag without a tagger
EOF
INPUT_END
test_expect_success \
'A: create pack from stdin' \
'git fast-import --export-marks=marks.out <input &&
git whatchanged master'
test_expect_success \
'A: verify pack' \
'for p in .git/objects/pack/*.pack;do git verify-pack $p||exit;done'
cat >expect <<EOF
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
initial
EOF
test_expect_success \
'A: verify commit' \
'git cat-file commit master | sed 1d >actual &&
test_cmp expect actual'
cat >expect <<EOF
100644 blob file2
100644 blob file3
100755 blob file4
EOF
test_expect_success \
'A: verify tree' \
'git cat-file -p master^{tree} | sed "s/ [0-9a-f]* / /" >actual &&
test_cmp expect actual'
echo "$file2_data" >expect
test_expect_success \
'A: verify file2' \
'git cat-file blob master:file2 >actual && test_cmp expect actual'
echo "$file3_data" >expect
test_expect_success \
'A: verify file3' \
'git cat-file blob master:file3 >actual && test_cmp expect actual'
printf "$file4_data" >expect
test_expect_success \
'A: verify file4' \
'git cat-file blob master:file4 >actual && test_cmp expect actual'
cat >expect <<EOF
object $(git rev-parse refs/heads/master)
type commit
tag series-A
An annotated tag without a tagger
EOF
test_expect_success 'A: verify tag/series-A' '
git cat-file tag tags/series-A >actual &&
test_cmp expect actual
'
cat >expect <<EOF
:2 `git rev-parse --verify master:file2`
:3 `git rev-parse --verify master:file3`
:4 `git rev-parse --verify master:file4`
:5 `git rev-parse --verify master^0`
EOF
test_expect_success \
'A: verify marks output' \
'test_cmp expect marks.out'
test_expect_success \
'A: verify marks import' \
'git fast-import \
--import-marks=marks.out \
--export-marks=marks.new \
</dev/null &&
test_cmp expect marks.new'
test_tick
cat >input <<INPUT_END
commit refs/heads/verify--import-marks
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
recreate from :5
COMMIT
from :5
M 755 :2 copy-of-file2
INPUT_END
test_expect_success \
'A: verify marks import does not crash' \
'git fast-import --import-marks=marks.out <input &&
git whatchanged verify--import-marks'
test_expect_success \
'A: verify pack' \
'for p in .git/objects/pack/*.pack;do git verify-pack $p||exit;done'
cat >expect <<EOF
:000000 100755 0000000000000000000000000000000000000000 7123f7f44e39be127c5eb701e5968176ee9d78b1 A copy-of-file2
EOF
git diff-tree -M -r master verify--import-marks >actual
test_expect_success \
'A: verify diff' \
'compare_diff_raw expect actual &&
test `git rev-parse --verify master:file2` \
= `git rev-parse --verify verify--import-marks:copy-of-file2`'
test_tick
mt=$(git hash-object --stdin < /dev/null)
: >input.blob
: >marks.exp
: >tree.exp
cat >input.commit <<EOF
commit refs/heads/verify--dump-marks
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
test the sparse array dumping routines with exponentially growing marks
COMMIT
EOF
i=0
l=4
m=6
n=7
while test "$i" -lt 27; do
cat >>input.blob <<EOF
blob
mark :$l
data 0
blob
mark :$m
data 0
blob
mark :$n
data 0
EOF
echo "M 100644 :$l l$i" >>input.commit
echo "M 100644 :$m m$i" >>input.commit
echo "M 100644 :$n n$i" >>input.commit
echo ":$l $mt" >>marks.exp
echo ":$m $mt" >>marks.exp
echo ":$n $mt" >>marks.exp
printf "100644 blob $mt\tl$i\n" >>tree.exp
printf "100644 blob $mt\tm$i\n" >>tree.exp
printf "100644 blob $mt\tn$i\n" >>tree.exp
l=$(($l + $l))
m=$(($m + $m))
n=$(($l + $n))
i=$((1 + $i))
done
sort tree.exp > tree.exp_s
test_expect_success 'A: export marks with large values' '
cat input.blob input.commit | git fast-import --export-marks=marks.large &&
git ls-tree refs/heads/verify--dump-marks >tree.out &&
test_cmp tree.exp_s tree.out &&
test_cmp marks.exp marks.large'
###
### series B
###
test_tick
cat >input <<INPUT_END
commit refs/heads/branch
mark :1
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
corrupt
COMMIT
from refs/heads/master
M 755 0000000000000000000000000000000000000001 zero1
INPUT_END
test_expect_success 'B: fail on invalid blob sha1' '
test_must_fail git fast-import <input
'
rm -f .git/objects/pack_* .git/objects/index_*
cat >input <<INPUT_END
commit .badbranchname
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
corrupt
COMMIT
from refs/heads/master
INPUT_END
test_expect_success 'B: fail on invalid branch name ".badbranchname"' '
test_must_fail git fast-import <input
'
rm -f .git/objects/pack_* .git/objects/index_*
cat >input <<INPUT_END
commit bad[branch]name
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
corrupt
COMMIT
from refs/heads/master
INPUT_END
test_expect_success 'B: fail on invalid branch name "bad[branch]name"' '
test_must_fail git fast-import <input
'
rm -f .git/objects/pack_* .git/objects/index_*
cat >input <<INPUT_END
commit TEMP_TAG
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
tag base
COMMIT
from refs/heads/master
INPUT_END
test_expect_success \
'B: accept branch name "TEMP_TAG"' \
'git fast-import <input &&
test -f .git/TEMP_TAG &&
test `git rev-parse master` = `git rev-parse TEMP_TAG^`'
rm -f .git/TEMP_TAG
###
### series C
###
newf=`echo hi newf | git hash-object -w --stdin`
oldf=`git rev-parse --verify master:file2`
test_tick
cat >input <<INPUT_END
commit refs/heads/branch
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
second
COMMIT
from refs/heads/master
M 644 $oldf file2/oldf
M 755 $newf file2/newf
D file3
INPUT_END
test_expect_success \
'C: incremental import create pack from stdin' \
'git fast-import <input &&
git whatchanged branch'
test_expect_success \
'C: verify pack' \
'for p in .git/objects/pack/*.pack;do git verify-pack $p||exit;done'
test_expect_success \
'C: validate reuse existing blob' \
'test $newf = `git rev-parse --verify branch:file2/newf` &&
test $oldf = `git rev-parse --verify branch:file2/oldf`'
cat >expect <<EOF
parent `git rev-parse --verify master^0`
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
second
EOF
test_expect_success \
'C: verify commit' \
'git cat-file commit branch | sed 1d >actual &&
test_cmp expect actual'
cat >expect <<EOF
:000000 100755 0000000000000000000000000000000000000000 f1fb5da718392694d0076d677d6d0e364c79b0bc A file2/newf
:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 R100 file2 file2/oldf
:100644 000000 0d92e9f3374ae2947c23aa477cbc68ce598135f1 0000000000000000000000000000000000000000 D file3
EOF
git diff-tree -M -r master branch >actual
test_expect_success \
'C: validate rename result' \
'compare_diff_raw expect actual'
###
### series D
###
test_tick
cat >input <<INPUT_END
commit refs/heads/branch
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
third
COMMIT
from refs/heads/branch^0
M 644 inline newdir/interesting
data <<EOF
$file5_data
EOF
M 755 inline newdir/exec.sh
data <<EOF
$file6_data
EOF
INPUT_END
test_expect_success \
'D: inline data in commit' \
'git fast-import <input &&
git whatchanged branch'
test_expect_success \
'D: verify pack' \
'for p in .git/objects/pack/*.pack;do git verify-pack $p||exit;done'
cat >expect <<EOF
:000000 100755 0000000000000000000000000000000000000000 35a59026a33beac1569b1c7f66f3090ce9c09afc A newdir/exec.sh
:000000 100644 0000000000000000000000000000000000000000 046d0371e9220107917db0d0e030628de8a1de9b A newdir/interesting
EOF
git diff-tree -M -r branch^ branch >actual
test_expect_success \
'D: validate new files added' \
'compare_diff_raw expect actual'
echo "$file5_data" >expect
test_expect_success \
'D: verify file5' \
'git cat-file blob branch:newdir/interesting >actual &&
test_cmp expect actual'
echo "$file6_data" >expect
test_expect_success \
'D: verify file6' \
'git cat-file blob branch:newdir/exec.sh >actual &&
test_cmp expect actual'
###
### series E
###
cat >input <<INPUT_END
commit refs/heads/branch
author $GIT_AUTHOR_NAME <$GIT_AUTHOR_EMAIL> Tue Feb 6 11:22:18 2007 -0500
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> Tue Feb 6 12:35:02 2007 -0500
data <<COMMIT
RFC 2822 type date
COMMIT
from refs/heads/branch^0
INPUT_END
test_expect_success 'E: rfc2822 date, --date-format=raw' '
test_must_fail git fast-import --date-format=raw <input
'
test_expect_success \
'E: rfc2822 date, --date-format=rfc2822' \
'git fast-import --date-format=rfc2822 <input'
test_expect_success \
'E: verify pack' \
'for p in .git/objects/pack/*.pack;do git verify-pack $p||exit;done'
cat >expect <<EOF
author $GIT_AUTHOR_NAME <$GIT_AUTHOR_EMAIL> 1170778938 -0500
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> 1170783302 -0500
RFC 2822 type date
EOF
test_expect_success \
'E: verify commit' \
'git cat-file commit branch | sed 1,2d >actual &&
test_cmp expect actual'
###
### series F
###
old_branch=`git rev-parse --verify branch^0`
test_tick
cat >input <<INPUT_END
commit refs/heads/branch
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
losing things already?
COMMIT
from refs/heads/branch~1
reset refs/heads/other
from refs/heads/branch
INPUT_END
test_expect_success \
'F: non-fast-forward update skips' \
'if git fast-import <input
then
echo BAD gfi did not fail
return 1
else
if test $old_branch = `git rev-parse --verify branch^0`
then
: branch unaffected and failure returned
return 0
else
echo BAD gfi changed branch $old_branch
return 1
fi
fi
'
test_expect_success \
'F: verify pack' \
'for p in .git/objects/pack/*.pack;do git verify-pack $p||exit;done'
cat >expect <<EOF
tree `git rev-parse branch~1^{tree}`
parent `git rev-parse branch~1`
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
losing things already?
EOF
test_expect_success \
'F: verify other commit' \
'git cat-file commit other >actual &&
test_cmp expect actual'
###
### series G
###
old_branch=`git rev-parse --verify branch^0`
test_tick
cat >input <<INPUT_END
commit refs/heads/branch
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
losing things already?
COMMIT
from refs/heads/branch~1
INPUT_END
test_expect_success \
'G: non-fast-forward update forced' \
'git fast-import --force <input'
test_expect_success \
'G: verify pack' \
'for p in .git/objects/pack/*.pack;do git verify-pack $p||exit;done'
test_expect_success \
'G: branch changed, but logged' \
'test $old_branch != `git rev-parse --verify branch^0` &&
test $old_branch = `git rev-parse --verify branch@{1}`'
###
### series H
###
test_tick
cat >input <<INPUT_END
commit refs/heads/H
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
third
COMMIT
from refs/heads/branch^0
M 644 inline i-will-die
data <<EOF
this file will never exist.
EOF
deleteall
M 644 inline h/e/l/lo
data <<EOF
$file5_data
EOF
INPUT_END
test_expect_success \
'H: deletall, add 1' \
'git fast-import <input &&
git whatchanged H'
test_expect_success \
'H: verify pack' \
'for p in .git/objects/pack/*.pack;do git verify-pack $p||exit;done'
cat >expect <<EOF
:100755 000000 f1fb5da718392694d0076d677d6d0e364c79b0bc 0000000000000000000000000000000000000000 D file2/newf
:100644 000000 7123f7f44e39be127c5eb701e5968176ee9d78b1 0000000000000000000000000000000000000000 D file2/oldf
:100755 000000 85df50785d62d3b05ab03d9cbf7e4a0b49449730 0000000000000000000000000000000000000000 D file4
:100644 100644 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 R100 newdir/interesting h/e/l/lo
:100755 000000 e74b7d465e52746be2b4bae983670711e6e66657 0000000000000000000000000000000000000000 D newdir/exec.sh
EOF
git diff-tree -M -r H^ H >actual
test_expect_success \
'H: validate old files removed, new files added' \
'compare_diff_raw expect actual'
echo "$file5_data" >expect
test_expect_success \
'H: verify file' \
'git cat-file blob H:h/e/l/lo >actual &&
test_cmp expect actual'
###
### series I
###
cat >input <<INPUT_END
commit refs/heads/export-boundary
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
we have a border. its only 40 characters wide.
COMMIT
from refs/heads/branch
INPUT_END
test_expect_success \
'I: export-pack-edges' \
'git fast-import --export-pack-edges=edges.list <input'
cat >expect <<EOF
.git/objects/pack/pack-.pack: `git rev-parse --verify export-boundary`
EOF
test_expect_success \
'I: verify edge list' \
'sed -e s/pack-.*pack/pack-.pack/ edges.list >actual &&
test_cmp expect actual'
###
### series J
###
cat >input <<INPUT_END
commit refs/heads/J
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
create J
COMMIT
from refs/heads/branch
reset refs/heads/J
commit refs/heads/J
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
initialize J
COMMIT
INPUT_END
test_expect_success \
'J: reset existing branch creates empty commit' \
'git fast-import <input'
test_expect_success \
'J: branch has 1 commit, empty tree' \
'test 1 = `git rev-list J | wc -l` &&
test 0 = `git ls-tree J | wc -l`'
###
### series K
###
cat >input <<INPUT_END
commit refs/heads/K
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
create K
COMMIT
from refs/heads/branch
commit refs/heads/K
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
redo K
COMMIT
from refs/heads/branch^1
INPUT_END
test_expect_success \
'K: reinit branch with from' \
'git fast-import <input'
test_expect_success \
'K: verify K^1 = branch^1' \
'test `git rev-parse --verify branch^1` \
= `git rev-parse --verify K^1`'
###
### series L
###
cat >input <<INPUT_END
blob
mark :1
data <<EOF
some data
EOF
blob
mark :2
data <<EOF
other data
EOF
commit refs/heads/L
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
create L
COMMIT
M 644 :1 b.
M 644 :1 b/other
M 644 :1 ba
commit refs/heads/L
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
update L
COMMIT
M 644 :2 b.
M 644 :2 b/other
M 644 :2 ba
INPUT_END
cat >expect <<EXPECT_END
:100644 100644 4268632... 55d3a52... M b.
:040000 040000 0ae5cac... 443c768... M b
:100644 100644 4268632... 55d3a52... M ba
EXPECT_END
test_expect_success \
'L: verify internal tree sorting' \
'git fast-import <input &&
git diff-tree --abbrev --raw L^ L >output &&
test_cmp expect output'
###
### series M
###
test_tick
cat >input <<INPUT_END
commit refs/heads/M1
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
file rename
COMMIT
from refs/heads/branch^0
R file2/newf file2/n.e.w.f
INPUT_END
cat >expect <<EOF
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc R100 file2/newf file2/n.e.w.f
EOF
test_expect_success \
'M: rename file in same subdirectory' \
'git fast-import <input &&
git diff-tree -M -r M1^ M1 >actual &&
compare_diff_raw expect actual'
cat >input <<INPUT_END
commit refs/heads/M2
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
file rename
COMMIT
from refs/heads/branch^0
R file2/newf i/am/new/to/you
INPUT_END
cat >expect <<EOF
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc R100 file2/newf i/am/new/to/you
EOF
test_expect_success \
'M: rename file to new subdirectory' \
'git fast-import <input &&
git diff-tree -M -r M2^ M2 >actual &&
compare_diff_raw expect actual'
cat >input <<INPUT_END
commit refs/heads/M3
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
file rename
COMMIT
from refs/heads/M2^0
R i other/sub
INPUT_END
cat >expect <<EOF
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc R100 i/am/new/to/you other/sub/am/new/to/you
EOF
test_expect_success \
'M: rename subdirectory to new subdirectory' \
'git fast-import <input &&
git diff-tree -M -r M3^ M3 >actual &&
compare_diff_raw expect actual'
###
### series N
###
test_tick
cat >input <<INPUT_END
commit refs/heads/N1
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
file copy
COMMIT
from refs/heads/branch^0
C file2/newf file2/n.e.w.f
INPUT_END
cat >expect <<EOF
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc C100 file2/newf file2/n.e.w.f
EOF
test_expect_success \
'N: copy file in same subdirectory' \
'git fast-import <input &&
git diff-tree -C --find-copies-harder -r N1^ N1 >actual &&
compare_diff_raw expect actual'
cat >input <<INPUT_END
commit refs/heads/N2
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
clean directory copy
COMMIT
from refs/heads/branch^0
C file2 file3
commit refs/heads/N2
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
modify directory copy
COMMIT
M 644 inline file3/file5
data <<EOF
$file5_data
EOF
INPUT_END
cat >expect <<EOF
:100644 100644 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 C100 newdir/interesting file3/file5
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc C100 file2/newf file3/newf
:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 C100 file2/oldf file3/oldf
EOF
test_expect_success \
'N: copy then modify subdirectory' \
'git fast-import <input &&
git diff-tree -C --find-copies-harder -r N2^^ N2 >actual &&
compare_diff_raw expect actual'
cat >input <<INPUT_END
commit refs/heads/N3
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
dirty directory copy
COMMIT
from refs/heads/branch^0
M 644 inline file2/file5
data <<EOF
$file5_data
EOF
C file2 file3
D file2/file5
INPUT_END
test_expect_success \
'N: copy dirty subdirectory' \
'git fast-import <input &&
test `git rev-parse N2^{tree}` = `git rev-parse N3^{tree}`'
test_expect_success \
'N: copy directory by id' \
'cat >expect <<-\EOF &&
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc C100 file2/newf file3/newf
:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 C100 file2/oldf file3/oldf
EOF
subdir=$(git rev-parse refs/heads/branch^0:file2) &&
cat >input <<-INPUT_END &&
commit refs/heads/N4
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy by tree hash
COMMIT
from refs/heads/branch^0
M 040000 $subdir file3
INPUT_END
git fast-import <input &&
git diff-tree -C --find-copies-harder -r N4^ N4 >actual &&
compare_diff_raw expect actual'
test_expect_success PIPE 'N: read and copy directory' '
cat >expect <<-\EOF
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc C100 file2/newf file3/newf
:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 C100 file2/oldf file3/oldf
EOF
git update-ref -d refs/heads/N4 &&
rm -f backflow &&
mkfifo backflow &&
(
exec <backflow &&
cat <<-EOF &&
commit refs/heads/N4
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy by tree hash, part 2
COMMIT
from refs/heads/branch^0
ls "file2"
EOF
read mode type tree filename &&
echo "M 040000 $tree file3"
) |
git fast-import --cat-blob-fd=3 3>backflow &&
git diff-tree -C --find-copies-harder -r N4^ N4 >actual &&
compare_diff_raw expect actual
'
test_expect_success PIPE 'N: empty directory reads as missing' '
cat <<-\EOF >expect &&
OBJNAME
:000000 100644 OBJNAME OBJNAME A unrelated
EOF
echo "missing src" >expect.response &&
git update-ref -d refs/heads/read-empty &&
rm -f backflow &&
mkfifo backflow &&
(
exec <backflow &&
cat <<-EOF &&
commit refs/heads/read-empty
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
read "empty" (missing) directory
COMMIT
M 100644 inline src/greeting
data <<BLOB
hello
BLOB
C src/greeting dst1/non-greeting
C src/greeting unrelated
# leave behind "empty" src directory
D src/greeting
ls "src"
EOF
read -r line &&
printf "%s\n" "$line" >response &&
cat <<-\EOF
D dst1
D dst2
EOF
) |
git fast-import --cat-blob-fd=3 3>backflow &&
test_cmp expect.response response &&
git rev-list read-empty |
git diff-tree -r --root --stdin |
sed "s/$_x40/OBJNAME/g" >actual &&
test_cmp expect actual
'
test_expect_success \
'N: copy root directory by tree hash' \
'cat >expect <<-\EOF &&
:100755 000000 f1fb5da718392694d0076d677d6d0e364c79b0bc 0000000000000000000000000000000000000000 D file3/newf
:100644 000000 7123f7f44e39be127c5eb701e5968176ee9d78b1 0000000000000000000000000000000000000000 D file3/oldf
EOF
root=$(git rev-parse refs/heads/branch^0^{tree}) &&
cat >input <<-INPUT_END &&
commit refs/heads/N6
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy root directory by tree hash
COMMIT
from refs/heads/branch^0
M 040000 $root ""
INPUT_END
git fast-import <input &&
git diff-tree -C --find-copies-harder -r N4 N6 >actual &&
compare_diff_raw expect actual'
test_expect_success \
'N: delete directory by copying' \
'cat >expect <<-\EOF &&
OBJID
:100644 000000 OBJID OBJID D foo/bar/qux
OBJID
:000000 100644 OBJID OBJID A foo/bar/baz
:000000 100644 OBJID OBJID A foo/bar/qux
EOF
empty_tree=$(git mktree </dev/null) &&
cat >input <<-INPUT_END &&
commit refs/heads/N-delete
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
collect data to be deleted
COMMIT
deleteall
M 100644 inline foo/bar/baz
data <<DATA_END
hello
DATA_END
C "foo/bar/baz" "foo/bar/qux"
C "foo/bar/baz" "foo/bar/quux/1"
C "foo/bar/baz" "foo/bar/quuux"
M 040000 $empty_tree foo/bar/quux
M 040000 $empty_tree foo/bar/quuux
commit refs/heads/N-delete
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
delete subdirectory
COMMIT
M 040000 $empty_tree foo/bar/qux
INPUT_END
git fast-import <input &&
git rev-list N-delete |
git diff-tree -r --stdin --root --always |
sed -e "s/$_x40/OBJID/g" >actual &&
test_cmp expect actual'
test_expect_success \
'N: modify copied tree' \
'cat >expect <<-\EOF &&
:100644 100644 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 C100 newdir/interesting file3/file5
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc C100 file2/newf file3/newf
:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 C100 file2/oldf file3/oldf
EOF
subdir=$(git rev-parse refs/heads/branch^0:file2) &&
cat >input <<-INPUT_END &&
commit refs/heads/N5
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy by tree hash
COMMIT
from refs/heads/branch^0
M 040000 $subdir file3
commit refs/heads/N5
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
modify directory copy
COMMIT
M 644 inline file3/file5
data <<EOF
$file5_data
EOF
INPUT_END
git fast-import <input &&
git diff-tree -C --find-copies-harder -r N5^^ N5 >actual &&
compare_diff_raw expect actual'
test_expect_success \
'N: reject foo/ syntax' \
'subdir=$(git rev-parse refs/heads/branch^0:file2) &&
test_must_fail git fast-import <<-INPUT_END
commit refs/heads/N5B
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy with invalid syntax
COMMIT
from refs/heads/branch^0
M 040000 $subdir file3/
INPUT_END'
test_expect_success \
'N: copy to root by id and modify' \
'echo "hello, world" >expect.foo &&
echo hello >expect.bar &&
git fast-import <<-SETUP_END &&
commit refs/heads/N7
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
hello, tree
COMMIT
deleteall
M 644 inline foo/bar
data <<EOF
hello
EOF
SETUP_END
tree=$(git rev-parse --verify N7:) &&
git fast-import <<-INPUT_END &&
commit refs/heads/N8
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy to root by id and modify
COMMIT
M 040000 $tree ""
M 644 inline foo/foo
data <<EOF
hello, world
EOF
INPUT_END
git show N8:foo/foo >actual.foo &&
git show N8:foo/bar >actual.bar &&
test_cmp expect.foo actual.foo &&
test_cmp expect.bar actual.bar'
test_expect_success \
'N: extract subtree' \
'branch=$(git rev-parse --verify refs/heads/branch^{tree}) &&
cat >input <<-INPUT_END &&
commit refs/heads/N9
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
extract subtree branch:newdir
COMMIT
M 040000 $branch ""
C "newdir" ""
INPUT_END
git fast-import <input &&
git diff --exit-code branch:newdir N9'
test_expect_success \
'N: modify subtree, extract it, and modify again' \
'echo hello >expect.baz &&
echo hello, world >expect.qux &&
git fast-import <<-SETUP_END &&
commit refs/heads/N10
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
hello, tree
COMMIT
deleteall
M 644 inline foo/bar/baz
data <<EOF
hello
EOF
SETUP_END
tree=$(git rev-parse --verify N10:) &&
git fast-import <<-INPUT_END &&
commit refs/heads/N11
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy to root by id and modify
COMMIT
M 040000 $tree ""
M 100644 inline foo/bar/qux
data <<EOF
hello, world
EOF
R "foo" ""
C "bar/qux" "bar/quux"
INPUT_END
git show N11:bar/baz >actual.baz &&
git show N11:bar/qux >actual.qux &&
git show N11:bar/quux >actual.quux &&
test_cmp expect.baz actual.baz &&
test_cmp expect.qux actual.qux &&
test_cmp expect.qux actual.quux'
###
### series O
###
cat >input <<INPUT_END
#we will
commit refs/heads/O1
# -- ignore all of this text
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
# $GIT_COMMITTER_NAME has inserted here for his benefit.
data <<COMMIT
dirty directory copy
COMMIT
# don't forget the import blank line!
#
# yes, we started from our usual base of branch^0.
# i like branch^0.
from refs/heads/branch^0
# and we need to reuse file2/file5 from N3 above.
M 644 inline file2/file5
# otherwise the tree will be different
data <<EOF
$file5_data
EOF
# don't forget to copy file2 to file3
C file2 file3
#
# or to delete file5 from file2.
D file2/file5
# are we done yet?
INPUT_END
test_expect_success \
'O: comments are all skipped' \
'git fast-import <input &&
test `git rev-parse N3` = `git rev-parse O1`'
cat >input <<INPUT_END
commit refs/heads/O2
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
dirty directory copy
COMMIT
from refs/heads/branch^0
M 644 inline file2/file5
data <<EOF
$file5_data
EOF
C file2 file3
D file2/file5
INPUT_END
test_expect_success \
'O: blank lines not necessary after data commands' \
'git fast-import <input &&
test `git rev-parse N3` = `git rev-parse O2`'
test_expect_success \
'O: repack before next test' \
'git repack -a -d'
cat >input <<INPUT_END
commit refs/heads/O3
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zstring
COMMIT
commit refs/heads/O3
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zof
COMMIT
checkpoint
commit refs/heads/O3
mark :5
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zempty
COMMIT
checkpoint
commit refs/heads/O3
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zcommits
COMMIT
reset refs/tags/O3-2nd
from :5
reset refs/tags/O3-3rd
from :5
INPUT_END
cat >expect <<INPUT_END
string
of
empty
commits
INPUT_END
test_expect_success \
'O: blank lines not necessary after other commands' \
'git fast-import <input &&
test 8 = `find .git/objects/pack -type f | wc -l` &&
test `git rev-parse refs/tags/O3-2nd` = `git rev-parse O3^` &&
git log --reverse --pretty=oneline O3 | sed s/^.*z// >actual &&
test_cmp expect actual'
cat >input <<INPUT_END
commit refs/heads/O4
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zstring
COMMIT
commit refs/heads/O4
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zof
COMMIT
progress Two commits down, 2 to go!
commit refs/heads/O4
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zempty
COMMIT
progress Three commits down, 1 to go!
commit refs/heads/O4
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zcommits
COMMIT
progress I'm done!
INPUT_END
test_expect_success \
'O: progress outputs as requested by input' \
'git fast-import <input >actual &&
grep "progress " <input >expect &&
test_cmp expect actual'
###
### series P (gitlinks)
###
cat >input <<INPUT_END
blob
mark :1
data 10
test file
reset refs/heads/sub
commit refs/heads/sub
mark :2
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data 12
sub_initial
M 100644 :1 file
blob
mark :3
data <<DATAEND
[submodule "sub"]
path = sub
url = "`pwd`/sub"
DATAEND
commit refs/heads/subuse1
mark :4
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data 8
initial
from refs/heads/master
M 100644 :3 .gitmodules
M 160000 :2 sub
blob
mark :5
data 20
test file
more data
commit refs/heads/sub
mark :6
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data 11
sub_second
from :2
M 100644 :5 file
commit refs/heads/subuse1
mark :7
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data 7
second
from :4
M 160000 :6 sub
INPUT_END
test_expect_success \
'P: supermodule & submodule mix' \
'git fast-import <input &&
git checkout subuse1 &&
rm -rf sub && mkdir sub && (cd sub &&
git init &&
git fetch --update-head-ok .. refs/heads/sub:refs/heads/master &&
git checkout master) &&
git submodule init &&
git submodule update'
SUBLAST=$(git rev-parse --verify sub)
SUBPREV=$(git rev-parse --verify sub^)
cat >input <<INPUT_END
blob
mark :1
data <<DATAEND
[submodule "sub"]
path = sub
url = "`pwd`/sub"
DATAEND
commit refs/heads/subuse2
mark :2
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data 8
initial
from refs/heads/master
M 100644 :1 .gitmodules
M 160000 $SUBPREV sub
commit refs/heads/subuse2
mark :3
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data 7
second
from :2
M 160000 $SUBLAST sub
INPUT_END
test_expect_success \
'P: verbatim SHA gitlinks' \
'git branch -D sub &&
git gc && git prune &&
git fast-import <input &&
test $(git rev-parse --verify subuse2) = $(git rev-parse --verify subuse1)'
test_tick
cat >input <<INPUT_END
commit refs/heads/subuse3
mark :1
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
corrupt
COMMIT
from refs/heads/subuse2
M 160000 inline sub
data <<DATA
$SUBPREV
DATA
INPUT_END
test_expect_success 'P: fail on inline gitlink' '
test_must_fail git fast-import <input'
test_tick
cat >input <<INPUT_END
blob
mark :1
data <<DATA
$SUBPREV
DATA
commit refs/heads/subuse3
mark :2
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
corrupt
COMMIT
from refs/heads/subuse2
M 160000 :1 sub
INPUT_END
test_expect_success 'P: fail on blob mark in gitlink' '
test_must_fail git fast-import <input'
###
### series Q (notes)
###
note1_data="The first note for the first commit"
note2_data="The first note for the second commit"
note3_data="The first note for the third commit"
note1b_data="The second note for the first commit"
note1c_data="The third note for the first commit"
note2b_data="The second note for the second commit"
test_tick
cat >input <<INPUT_END
blob
mark :2
data <<EOF
$file2_data
EOF
commit refs/heads/notes-test
mark :3
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
first (:3)
COMMIT
M 644 :2 file2
blob
mark :4
data $file4_len
$file4_data
commit refs/heads/notes-test
mark :5
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
second (:5)
COMMIT
M 644 :4 file4
commit refs/heads/notes-test
mark :6
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
third (:6)
COMMIT
M 644 inline file5
data <<EOF
$file5_data
EOF
M 755 inline file6
data <<EOF
$file6_data
EOF
blob
mark :7
data <<EOF
$note1_data
EOF
blob
mark :8
data <<EOF
$note2_data
EOF
commit refs/notes/foobar
mark :9
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
notes (:9)
COMMIT
N :7 :3
N :8 :5
N inline :6
data <<EOF
$note3_data
EOF
commit refs/notes/foobar
mark :10
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
notes (:10)
COMMIT
N inline :3
data <<EOF
$note1b_data
EOF
commit refs/notes/foobar2
mark :11
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
notes (:11)
COMMIT
N inline :3
data <<EOF
$note1c_data
EOF
commit refs/notes/foobar
mark :12
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
notes (:12)
COMMIT
deleteall
N inline :5
data <<EOF
$note2b_data
EOF
INPUT_END
test_expect_success \
'Q: commit notes' \
'git fast-import <input &&
git whatchanged notes-test'
test_expect_success \
'Q: verify pack' \
'for p in .git/objects/pack/*.pack;do git verify-pack $p||exit;done'
commit1=$(git rev-parse notes-test~2)
commit2=$(git rev-parse notes-test^)
commit3=$(git rev-parse notes-test)
cat >expect <<EOF
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
first (:3)
EOF
test_expect_success \
'Q: verify first commit' \
'git cat-file commit notes-test~2 | sed 1d >actual &&
test_cmp expect actual'
cat >expect <<EOF
parent $commit1
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
second (:5)
EOF
test_expect_success \
'Q: verify second commit' \
'git cat-file commit notes-test^ | sed 1d >actual &&
test_cmp expect actual'
cat >expect <<EOF
parent $commit2
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
third (:6)
EOF
test_expect_success \
'Q: verify third commit' \
'git cat-file commit notes-test | sed 1d >actual &&
test_cmp expect actual'
cat >expect <<EOF
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
notes (:9)
EOF
test_expect_success \
'Q: verify first notes commit' \
'git cat-file commit refs/notes/foobar~2 | sed 1d >actual &&
test_cmp expect actual'
cat >expect.unsorted <<EOF
100644 blob $commit1
100644 blob $commit2
100644 blob $commit3
EOF
cat expect.unsorted | sort >expect
test_expect_success \
'Q: verify first notes tree' \
'git cat-file -p refs/notes/foobar~2^{tree} | sed "s/ [0-9a-f]* / /" >actual &&
test_cmp expect actual'
echo "$note1_data" >expect
test_expect_success \
'Q: verify first note for first commit' \
'git cat-file blob refs/notes/foobar~2:$commit1 >actual && test_cmp expect actual'
echo "$note2_data" >expect
test_expect_success \
'Q: verify first note for second commit' \
'git cat-file blob refs/notes/foobar~2:$commit2 >actual && test_cmp expect actual'
echo "$note3_data" >expect
test_expect_success \
'Q: verify first note for third commit' \
'git cat-file blob refs/notes/foobar~2:$commit3 >actual && test_cmp expect actual'
cat >expect <<EOF
parent `git rev-parse --verify refs/notes/foobar~2`
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
notes (:10)
EOF
test_expect_success \
'Q: verify second notes commit' \
'git cat-file commit refs/notes/foobar^ | sed 1d >actual &&
test_cmp expect actual'
cat >expect.unsorted <<EOF
100644 blob $commit1
100644 blob $commit2
100644 blob $commit3
EOF
cat expect.unsorted | sort >expect
test_expect_success \
'Q: verify second notes tree' \
'git cat-file -p refs/notes/foobar^^{tree} | sed "s/ [0-9a-f]* / /" >actual &&
test_cmp expect actual'
echo "$note1b_data" >expect
test_expect_success \
'Q: verify second note for first commit' \
'git cat-file blob refs/notes/foobar^:$commit1 >actual && test_cmp expect actual'
echo "$note2_data" >expect
test_expect_success \
'Q: verify first note for second commit' \
'git cat-file blob refs/notes/foobar^:$commit2 >actual && test_cmp expect actual'
echo "$note3_data" >expect
test_expect_success \
'Q: verify first note for third commit' \
'git cat-file blob refs/notes/foobar^:$commit3 >actual && test_cmp expect actual'
cat >expect <<EOF
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
notes (:11)
EOF
test_expect_success \
'Q: verify third notes commit' \
'git cat-file commit refs/notes/foobar2 | sed 1d >actual &&
test_cmp expect actual'
cat >expect.unsorted <<EOF
100644 blob $commit1
EOF
cat expect.unsorted | sort >expect
test_expect_success \
'Q: verify third notes tree' \
'git cat-file -p refs/notes/foobar2^{tree} | sed "s/ [0-9a-f]* / /" >actual &&
test_cmp expect actual'
echo "$note1c_data" >expect
test_expect_success \
'Q: verify third note for first commit' \
'git cat-file blob refs/notes/foobar2:$commit1 >actual && test_cmp expect actual'
cat >expect <<EOF
parent `git rev-parse --verify refs/notes/foobar^`
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
notes (:12)
EOF
test_expect_success \
'Q: verify fourth notes commit' \
'git cat-file commit refs/notes/foobar | sed 1d >actual &&
test_cmp expect actual'
cat >expect.unsorted <<EOF
100644 blob $commit2
EOF
cat expect.unsorted | sort >expect
test_expect_success \
'Q: verify fourth notes tree' \
'git cat-file -p refs/notes/foobar^{tree} | sed "s/ [0-9a-f]* / /" >actual &&
test_cmp expect actual'
echo "$note2b_data" >expect
test_expect_success \
'Q: verify second note for second commit' \
'git cat-file blob refs/notes/foobar:$commit2 >actual && test_cmp expect actual'
###
### series R (feature and option)
###
cat >input <<EOF
feature no-such-feature-exists
EOF
test_expect_success 'R: abort on unsupported feature' '
test_must_fail git fast-import <input
'
cat >input <<EOF
feature date-format=now
EOF
test_expect_success 'R: supported feature is accepted' '
git fast-import <input
'
cat >input << EOF
blob
data 3
hi
feature date-format=now
EOF
test_expect_success 'R: abort on receiving feature after data command' '
test_must_fail git fast-import <input
'
cat >input << EOF
feature import-marks=git.marks
feature import-marks=git2.marks
EOF
test_expect_success 'R: only one import-marks feature allowed per stream' '
test_must_fail git fast-import <input
'
cat >input << EOF
feature export-marks=git.marks
blob
mark :1
data 3
hi
EOF
test_expect_success \
'R: export-marks feature results in a marks file being created' \
'cat input | git fast-import &&
grep :1 git.marks'
test_expect_success \
'R: export-marks options can be overriden by commandline options' \
'cat input | git fast-import --export-marks=other.marks &&
grep :1 other.marks'
test_expect_success 'R: catch typo in marks file name' '
test_must_fail git fast-import --import-marks=nonexistent.marks </dev/null &&
echo "feature import-marks=nonexistent.marks" |
test_must_fail git fast-import
'
test_expect_success 'R: import and output marks can be the same file' '
rm -f io.marks &&
blob=$(echo hi | git hash-object --stdin) &&
cat >expect <<-EOF &&
:1 $blob
:2 $blob
EOF
git fast-import --export-marks=io.marks <<-\EOF &&
blob
mark :1
data 3
hi
EOF
git fast-import --import-marks=io.marks --export-marks=io.marks <<-\EOF &&
blob
mark :2
data 3
hi
EOF
test_cmp expect io.marks
'
test_expect_success 'R: --import-marks=foo --output-marks=foo to create foo fails' '
rm -f io.marks &&
test_must_fail git fast-import --import-marks=io.marks --export-marks=io.marks <<-\EOF
blob
mark :1
data 3
hi
EOF
'
test_expect_success 'R: --import-marks-if-exists' '
rm -f io.marks &&
blob=$(echo hi | git hash-object --stdin) &&
echo ":1 $blob" >expect &&
git fast-import --import-marks-if-exists=io.marks --export-marks=io.marks <<-\EOF &&
blob
mark :1
data 3
hi
EOF
test_cmp expect io.marks
'
cat >input << EOF
feature import-marks=marks.out
feature export-marks=marks.new
EOF
test_expect_success \
'R: import to output marks works without any content' \
'cat input | git fast-import &&
test_cmp marks.out marks.new'
cat >input <<EOF
feature import-marks=nonexistent.marks
feature export-marks=marks.new
EOF
test_expect_success \
'R: import marks prefers commandline marks file over the stream' \
'cat input | git fast-import --import-marks=marks.out &&
test_cmp marks.out marks.new'
cat >input <<EOF
feature import-marks=nonexistent.marks
feature export-marks=combined.marks
EOF
test_expect_success 'R: multiple --import-marks= should be honoured' '
head -n2 marks.out > one.marks &&
tail -n +3 marks.out > two.marks &&
git fast-import --import-marks=one.marks --import-marks=two.marks <input &&
test_cmp marks.out combined.marks
'
cat >input <<EOF
feature relative-marks
feature import-marks=relative.in
feature export-marks=relative.out
EOF
test_expect_success 'R: feature relative-marks should be honoured' '
mkdir -p .git/info/fast-import/ &&
cp marks.new .git/info/fast-import/relative.in &&
git fast-import <input &&
test_cmp marks.new .git/info/fast-import/relative.out
'
cat >input <<EOF
feature relative-marks
feature import-marks=relative.in
feature no-relative-marks
feature export-marks=non-relative.out
EOF
test_expect_success 'R: feature no-relative-marks should be honoured' '
git fast-import <input &&
test_cmp marks.new non-relative.out
'
test_expect_success 'R: feature ls supported' '
echo "feature ls" |
git fast-import
'
test_expect_success 'R: feature cat-blob supported' '
echo "feature cat-blob" |
git fast-import
'
test_expect_success 'R: cat-blob-fd must be a nonnegative integer' '
test_must_fail git fast-import --cat-blob-fd=-1 </dev/null
'
test_expect_success 'R: print old blob' '
blob=$(echo "yes it can" | git hash-object -w --stdin) &&
cat >expect <<-EOF &&
${blob} blob 11
yes it can
EOF
echo "cat-blob $blob" |
git fast-import --cat-blob-fd=6 6>actual &&
test_cmp expect actual
'
test_expect_success 'R: in-stream cat-blob-fd not respected' '
echo hello >greeting &&
blob=$(git hash-object -w greeting) &&
cat >expect <<-EOF &&
${blob} blob 6
hello
EOF
git fast-import --cat-blob-fd=3 3>actual.3 >actual.1 <<-EOF &&
cat-blob $blob
EOF
test_cmp expect actual.3 &&
test_cmp empty actual.1 &&
git fast-import 3>actual.3 >actual.1 <<-EOF &&
option cat-blob-fd=3
cat-blob $blob
EOF
test_cmp empty actual.3 &&
test_cmp expect actual.1
'
test_expect_success 'R: print new blob' '
blob=$(echo "yep yep yep" | git hash-object --stdin) &&
cat >expect <<-EOF &&
${blob} blob 12
yep yep yep
EOF
git fast-import --cat-blob-fd=6 6>actual <<-\EOF &&
blob
mark :1
data <<BLOB_END
yep yep yep
BLOB_END
cat-blob :1
EOF
test_cmp expect actual
'
test_expect_success 'R: print new blob by sha1' '
blob=$(echo "a new blob named by sha1" | git hash-object --stdin) &&
cat >expect <<-EOF &&
${blob} blob 25
a new blob named by sha1
EOF
git fast-import --cat-blob-fd=6 6>actual <<-EOF &&
blob
data <<BLOB_END
a new blob named by sha1
BLOB_END
cat-blob $blob
EOF
test_cmp expect actual
'
test_expect_success 'setup: big file' '
(
echo "the quick brown fox jumps over the lazy dog" >big &&
for i in 1 2 3
do
cat big big big big >bigger &&
cat bigger bigger bigger bigger >big ||
exit
done
)
'
test_expect_success 'R: print two blobs to stdout' '
blob1=$(git hash-object big) &&
blob1_len=$(wc -c <big) &&
blob2=$(echo hello | git hash-object --stdin) &&
{
echo ${blob1} blob $blob1_len &&
cat big &&
cat <<-EOF
${blob2} blob 6
hello
EOF
} >expect &&
{
cat <<-\END_PART1 &&
blob
mark :1
data <<data_end
END_PART1
cat big &&
cat <<-\EOF
data_end
blob
mark :2
data <<data_end
hello
data_end
cat-blob :1
cat-blob :2
EOF
} |
git fast-import >actual &&
test_cmp expect actual
'
test_expect_success PIPE 'R: copy using cat-file' '
expect_id=$(git hash-object big) &&
expect_len=$(wc -c <big) &&
echo $expect_id blob $expect_len >expect.response &&
rm -f blobs &&
cat >frontend <<-\FRONTEND_END &&
#!/bin/sh
FRONTEND_END
mkfifo blobs &&
(
export GIT_COMMITTER_NAME GIT_COMMITTER_EMAIL GIT_COMMITTER_DATE &&
cat <<-\EOF &&
feature cat-blob
blob
mark :1
data <<BLOB
EOF
cat big &&
cat <<-\EOF &&
BLOB
cat-blob :1
EOF
read blob_id type size <&3 &&
echo "$blob_id $type $size" >response &&
head_c $size >blob <&3 &&
read newline <&3 &&
cat <<-EOF &&
commit refs/heads/copied
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy big file as file3
COMMIT
M 644 inline file3
data <<BLOB
EOF
cat blob &&
echo BLOB
) 3<blobs |
git fast-import --cat-blob-fd=3 3>blobs &&
git show copied:file3 >actual &&
test_cmp expect.response response &&
test_cmp big actual
'
test_expect_success PIPE 'R: print blob mid-commit' '
rm -f blobs &&
echo "A blob from _before_ the commit." >expect &&
mkfifo blobs &&
(
exec 3<blobs &&
cat <<-EOF &&
feature cat-blob
blob
mark :1
data <<BLOB
A blob from _before_ the commit.
BLOB
commit refs/heads/temporary
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
Empty commit
COMMIT
cat-blob :1
EOF
read blob_id type size <&3 &&
head_c $size >actual <&3 &&
read newline <&3 &&
echo
) |
git fast-import --cat-blob-fd=3 3>blobs &&
test_cmp expect actual
'
test_expect_success PIPE 'R: print staged blob within commit' '
rm -f blobs &&
echo "A blob from _within_ the commit." >expect &&
mkfifo blobs &&
(
exec 3<blobs &&
cat <<-EOF &&
feature cat-blob
commit refs/heads/within
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
Empty commit
COMMIT
M 644 inline within
data <<BLOB
A blob from _within_ the commit.
BLOB
EOF
to_get=$(
echo "A blob from _within_ the commit." |
git hash-object --stdin
) &&
echo "cat-blob $to_get" &&
read blob_id type size <&3 &&
head_c $size >actual <&3 &&
read newline <&3 &&
echo deleteall
) |
git fast-import --cat-blob-fd=3 3>blobs &&
test_cmp expect actual
'
cat >input << EOF
option git quiet
blob
data 3
hi
EOF
test_expect_success 'R: quiet option results in no stats being output' '
cat input | git fast-import 2> output &&
test_cmp empty output
'
test_expect_success 'R: feature done means terminating "done" is mandatory' '
echo feature done | test_must_fail git fast-import &&
test_must_fail git fast-import --done </dev/null
'
test_expect_success 'R: terminating "done" with trailing gibberish is ok' '
git fast-import <<-\EOF &&
feature done
done
trailing gibberish
EOF
git fast-import <<-\EOF
done
more trailing gibberish
EOF
'
test_expect_success 'R: terminating "done" within commit' '
cat >expect <<-\EOF &&
OBJID
:000000 100644 OBJID OBJID A hello.c
:000000 100644 OBJID OBJID A hello2.c
EOF
git fast-import <<-EOF &&
commit refs/heads/done-ends
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<EOT
Commit terminated by "done" command
EOT
M 100644 inline hello.c
data <<EOT
Hello, world.
EOT
C hello.c hello2.c
done
EOF
git rev-list done-ends |
git diff-tree -r --stdin --root --always |
sed -e "s/$_x40/OBJID/g" >actual &&
test_cmp expect actual
'
cat >input <<EOF
option git non-existing-option
EOF
test_expect_success 'R: die on unknown option' '
test_must_fail git fast-import <input
'
test_expect_success 'R: unknown commandline options are rejected' '\
test_must_fail git fast-import --non-existing-option < /dev/null
'
test_expect_success 'R: die on invalid option argument' '
echo "option git active-branches=-5" |
test_must_fail git fast-import &&
echo "option git depth=" |
test_must_fail git fast-import &&
test_must_fail git fast-import --depth="5 elephants" </dev/null
'
cat >input <<EOF
option non-existing-vcs non-existing-option
EOF
test_expect_success 'R: ignore non-git options' '
git fast-import <input
'
##
## R: very large blobs
##
blobsize=$((2*1024*1024 + 53))
test-genrandom bar $blobsize >expect
cat >input <<INPUT_END
commit refs/heads/big-file
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
R - big file
COMMIT
M 644 inline big1
data $blobsize
INPUT_END
cat expect >>input
cat >>input <<INPUT_END
M 644 inline big2
data $blobsize
INPUT_END
cat expect >>input
echo >>input
test_expect_success \
'R: blob bigger than threshold' \
'test_create_repo R &&
git --git-dir=R/.git fast-import --big-file-threshold=1 <input'
test_expect_success \
'R: verify created pack' \
': >verify &&
for p in R/.git/objects/pack/*.pack;
do
git verify-pack -v $p >>verify || exit;
done'
test_expect_success \
'R: verify written objects' \
'git --git-dir=R/.git cat-file blob big-file:big1 >actual &&
test_cmp expect actual &&
a=$(git --git-dir=R/.git rev-parse big-file:big1) &&
b=$(git --git-dir=R/.git rev-parse big-file:big2) &&
test $a = $b'
test_expect_success \
'R: blob appears only once' \
'n=$(grep $a verify | wc -l) &&
test 1 = $n'
test_done
|
moy/git
|
t/t9300-fast-import.sh
|
Shell
|
gpl-2.0
| 52,299 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2013-2020 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Make sure we can't use files from outside of the variant when parsing lua.
. ./tup.sh
check_no_windows variant
tmkdir build
cat > Tupfile.lua << HERE
tup.include '../foo.lua'
HERE
tup touch Tupfile.lua foo.lua build/tup.config
update_fail_msg "Unable to include file '../foo.lua' since it is outside of the variant tree"
eotup
|
ppannuto/tup
|
test/t8083-lua-include-dotdot.sh
|
Shell
|
gpl-2.0
| 1,081 |
FINEGRAINED_SUPPORTED=no
NAMEEXTRA=
run_bench() {
$SCRIPTDIR/shellpacks/shellpack-bench-vmr-createdelete
return $?
}
|
wjn740/mmtests
|
drivers/driver-vmr-createdelete.sh
|
Shell
|
gpl-2.0
| 120 |
#!/bin/bash -e
#
# Copyright 2013-2019 (c) Yousong Zhou
#
# Install requirements
#
# pip install -r requirements.txt
#
# Example
#
# # github streisand
# ansible-playbook playbooks/streisand.yml -vvv
#
PKG_NAME=ansible
PKG_VERSION=2.8.2
PKG_SOURCE="$PKG_NAME-$PKG_VERSION.tar.gz"
PKG_SOURCE_URL="http://releases.ansible.com/ansible/$PKG_SOURCE"
PKG_SOURCE_MD5SUM=6860a44bf6badad6a4f77091b53b04e3
PKG_PYTHON_VERSIONS=3
. "$PWD/env.sh"
. "$PWD/utils-python-package.sh"
|
yousong/build-scripts
|
build-ansible.sh
|
Shell
|
gpl-2.0
| 468 |
#!/usr/bin/env bash
PROJDIR=$1
[[ -z "$PROJDIR" ]] || [[ ! -d "$PROJDIR" ]] && {
PROJDIR=$(pwd)
echo "Project dir is not specified, using current directory - $PROJDIR" >&2
# exit 1
}
PROJDIR=$(realpath "$PROJDIR")
export PROJDIR
cd "$PROJDIR" || exit 1
ctags -R ./
vim -c ":NERDTree $PROJDIR"
|
flux242/dotfiles
|
.bin/vim_project.sh
|
Shell
|
gpl-2.0
| 302 |
#!/bin/bash
$CMD_DOUBLE_LONGER_SLEEP &
exec $CMD_PERF stat -a -e cpu-clock -I100 -- $CMD_LONGER_SLEEP 2> $LOGS_DIR/endless_wait.log
|
rfmvh/perftool-testsuite
|
base_stat/auxiliary/exec_perf.sh
|
Shell
|
gpl-2.0
| 133 |
#!/bin/bash
NAME=MakeDesktopShortcuts
SCRIPT=makedesktopshortcuts
DESKTOPFILE=makedesktopshortcuts.desktop
if [ "$(id -u)" != "0" ]
then
echo "This script must be run as root" 1>&2
exit 1
else
echo "Press Enter to uninstall $NAME from your system"
read
rm -v "/usr/share/kde4/services/ServiceMenus/$DESKTOPFILE"
rm -v "/usr/bin/$SCRIPT"
echo "Uninstallation complete"
fi
|
Faster3ck/MakeDesktopShortcuts
|
uninstall.sh
|
Shell
|
gpl-3.0
| 405 |
SERVER_IP0=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' dockerredis_master_1)
SERVER_IP1=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' dockerredis_slave_1)
SERVER_IP2=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' dockerredis_slave_2)
SENTINEL_IP1=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' dockerredis_sentinel_1)
SENTINEL_IP2=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' dockerredis_sentinel_2)
SENTINEL_IP3=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' dockerredis_sentinel_3)
echo ------------------------------------------------
echo Redis Master: $SERVER_IP0
echo Redis Slave Replica: $SERVER_IP1
echo Redis Slave Replica: $SERVER_IP2
echo Redis Sentinel_1: $SENTINEL_IP1
echo Redis Sentinel_2: $SENTINEL_IP2
echo Redis Sentinel_3: $SENTINEL_IP3
echo ------------------------------------------------
echo "- Current sentinel status -"; echo ""
docker exec dockerredis_sentinel_1 redis-cli -p 26379 info Sentinel
echo ""; echo "# Current master"
docker exec dockerredis_sentinel_1 redis-cli -p 26379 SENTINEL get-master-addr-by-name mymaster
echo ------------------------------------------------
echo "Stopping redis current master (60 seconds)";echo ""
docker pause dockerredis_master_1
sleep 60
echo ------------------------------------------------
echo "- Current sentinel status -"; echo ""
docker exec dockerredis_sentinel_1 redis-cli -p 26379 info Sentinel
echo ""; echo "# Current master"
docker exec dockerredis_sentinel_1 redis-cli -p 26379 SENTINEL get-master-addr-by-name mymaster
echo ------------------------------------------------
echo "Restarting redis previous master as a slave & stopping redis current master (60 seconds)";echo ""
docker unpause dockerredis_master_1
docker pause dockerredis_slave_1
sleep 60
echo ------------------------------------------------
echo "- Current sentinel status -"; echo ""
docker exec dockerredis_sentinel_1 redis-cli -p 26379 info Sentinel
echo ""; echo "# Current master"
docker exec dockerredis_sentinel_1 redis-cli -p 26379 SENTINEL get-master-addr-by-name mymaster
echo ------------------------------------------------
echo "Restarting redis prevoius master as current slave (60 seconds)"; echo ""
docker unpause dockerredis_slave_1
sleep 60
echo ------------------------------------------------
echo "- Current sentinel status -"; echo ""
docker exec dockerredis_sentinel_1 redis-cli -p 26379 info Sentinel
echo ""; echo "# Current master"
docker exec dockerredis_sentinel_1 redis-cli -p 26379 SENTINEL get-master-addr-by-name mymaster
echo ------------------------------------------------
|
gerschinner/redis-cluster-docker-compose
|
test.sh
|
Shell
|
gpl-3.0
| 2,649 |
#@ {
#@ "targets":
#@ [{
#@ "name":"signalflow.svg","dependencies":
#@ [{"ref":"dot","rel":"tool"}
#@ ,{"ref":"xsltproc","rel":"tool"}
#@ ,{"ref":"svgtopdf.sh","rel":"misc"}
#@ ,{"ref":"signalflow.dot","rel":"misc"}
#@ ,{"ref":"dotsvgfilter.xsl","rel":"misc"}]
#@ },{
#@ "name":"signalflow.svg.pdf","dependencies":
#@ [{"ref":"dot","rel":"tool"}
#@ ,{"ref":"xsltproc","rel":"tool"}
#@ ,{"ref":"signalflow.dot","rel":"misc"}
#@ ,{"ref":"svgtopdf.sh","rel":"misc"}
#@ ,{"ref":"dotsvgfilter.xsl","rel":"misc"}]
#@ }]
#@ }
abort()
{
exit -1
}
trap 'abort' 0
set -eo pipefail
target_dir="$1"
in_dir="$2"
dot -Tsvg "$in_dir"/signalflow.dot | xsltproc --novalid "$in_dir"/dotsvgfilter.xsl - \
> "$target_dir"/"$in_dir"/"signalflow.svg"
"$in_dir"/svgtopdf.sh "$target_dir"/"$in_dir"/"signalflow.svg" "$target_dir"/"$in_dir"/"signalflow.svg.pdf"
trap : 0
|
milasudril/anja
|
doc/signalflow.sh
|
Shell
|
gpl-3.0
| 880 |
#!/bin/bash
test_description='basic priority functionality
'
. ./test-lib.sh
test_todo_session 'priority usage' <<EOF
>>> todo.sh pri B B
usage: todo.sh pri ITEM# PRIORITY[, ITEM# PRIORITY, ...]
note: PRIORITY must be anywhere from A to Z.
=== 1
EOF
cat > todo.txt <<EOF
smell the uppercase Roses +flowers @outside
notice the sunflowers
stop
EOF
test_todo_session 'basic priority' <<EOF
>>> todo.sh list
2 notice the sunflowers
1 smell the uppercase Roses +flowers @outside
3 stop
--
TODO: 3 of 3 tasks shown
>>> todo.sh pri 1 B
1 (B) smell the uppercase Roses +flowers @outside
TODO: 1 prioritized (B).
>>> todo.sh list
[0;32m1 (B) smell the uppercase Roses +flowers @outside[0m
2 notice the sunflowers
3 stop
--
TODO: 3 of 3 tasks shown
>>> todo.sh -p list
1 (B) smell the uppercase Roses +flowers @outside
2 notice the sunflowers
3 stop
--
TODO: 3 of 3 tasks shown
>>> todo.sh pri 2 C
2 (C) notice the sunflowers
TODO: 2 prioritized (C).
>>> todo.sh -p list
1 (B) smell the uppercase Roses +flowers @outside
2 (C) notice the sunflowers
3 stop
--
TODO: 3 of 3 tasks shown
>>> todo.sh add "smell the coffee +wakeup"
4 smell the coffee +wakeup
TODO: 4 added.
>>> todo.sh -p list
1 (B) smell the uppercase Roses +flowers @outside
2 (C) notice the sunflowers
4 smell the coffee +wakeup
3 stop
--
TODO: 4 of 4 tasks shown
EOF
test_todo_session 'priority error' <<EOF
>>> todo.sh pri 10 B
=== 1
TODO: No task 10.
EOF
cat > todo.txt <<EOF
(B) smell the uppercase Roses +flowers @outside
(C) notice the sunflowers
stop
EOF
test_todo_session 'reprioritize' <<EOF
>>> todo.sh pri 2 A
2 (A) notice the sunflowers
TODO: 2 re-prioritized from (C) to (A).
>>> todo.sh -p list
2 (A) notice the sunflowers
1 (B) smell the uppercase Roses +flowers @outside
3 stop
--
TODO: 3 of 3 tasks shown
>>> todo.sh pri 2 a
2 (A) notice the sunflowers
TODO: 2 already prioritized (A).
>>> todo.sh -p list
2 (A) notice the sunflowers
1 (B) smell the uppercase Roses +flowers @outside
3 stop
--
TODO: 3 of 3 tasks shown
EOF
cat > todo.txt <<EOF
smell the uppercase Roses +flowers @outside
notice the sunflowers
stop
EOF
test_todo_session 'multiple priority' <<EOF
>>> todo.sh pri 1 A 2 B
1 (A) smell the uppercase Roses +flowers @outside
TODO: 1 prioritized (A).
2 (B) notice the sunflowers
TODO: 2 prioritized (B).
EOF
test_todo_session 'multiple reprioritize' <<EOF
>>> todo.sh pri 1 Z 2 X
1 (Z) smell the uppercase Roses +flowers @outside
TODO: 1 re-prioritized from (A) to (Z).
2 (X) notice the sunflowers
TODO: 2 re-prioritized from (B) to (X).
EOF
test_todo_session 'multiple prioritize error' <<EOF
>>> todo.sh pri 1 B 4 B
=== 1
1 (B) smell the uppercase Roses +flowers @outside
TODO: 1 re-prioritized from (Z) to (B).
TODO: No task 4.
>>> todo.sh pri 1 C 4 B 3 A
=== 1
1 (C) smell the uppercase Roses +flowers @outside
TODO: 1 re-prioritized from (B) to (C).
TODO: No task 4.
EOF
test_done
|
todotxt/todo.txt-cli
|
tests/t1200-pri.sh
|
Shell
|
gpl-3.0
| 2,890 |
#!/bin/bash
#SBATCH -p general
#SBATCH -n 1
#SBATCH -N 1
#SBATCH --mem 8000
#SBATCH -t 1-00:00:00
#SBATCH -o faidx_%j.out
#SBATCH -e faidx_%j.err
#SBATCH --constrain=holyib
module load samtools
samtools faidx final.assembly.homo.fa
|
ajshultz/whole-genome-reseq
|
make_faidx_hofi.sh
|
Shell
|
gpl-3.0
| 235 |
#!/usr/bin/env bash
# GeoNode installer script
#
# using getopts
#
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root" 1>&2
exit 1
fi
while getopts 's:' OPTION
do
case $OPTION in
s) stepflag=1
stepval="$OPTARG"
;;
?) printf "Usage: %s: [-s value] configfile\n" $(basename $0) >&2
exit 2
;;
esac
done
shift $(($OPTIND - 1))
function preinstall() {
# Places the geonode files in the right location in the file system
#
# First step is to unpack the wars in the tomcat webapps dir
#
mkdir -p $TOMCAT_WEBAPPS/geoserver
mkdir -p $TOMCAT_WEBAPPS/geonetwork
unzip $INSTALL_DIR/geoserver.war -d $TOMCAT_WEBAPPS/geoserver
unzip $INSTALL_DIR/geonetwork.war -d $TOMCAT_WEBAPPS/geonetwork
# GeoServer data is better placed outside tomcat to survive reinstalls
mkdir -p $GEOSERVER_DATA_DIR
cp -rp $TOMCAT_WEBAPPS/geoserver/data/* $GEOSERVER_DATA_DIR
#
# Second step is to put the apache wsgi and conf files in the right places
#
mkdir -p $GEONODE_WWW/static
mkdir -p $GEONODE_WWW/uploaded
# The wsgi directory is where the Python / Django application is configured
mkdir -p $GEONODE_WWW/wsgi
cp -rp $INSTALL_DIR/support/geonode.wsgi $GEONODE_WWW/wsgi/
# The robots.txt file tells google and other crawlers not to harvest /geoserver
# or /geonetwork, asking for all the layers at the same time is too taxing.
cp -rp $INSTALL_DIR/support/geonode.robots $GEONODE_WWW/robots.txt
# The apache configuration has a placeholder for the final location of the
# geonode virtualenv, it should be the site-packages directory of the venv.
mkdir -p $APACHE_SITES
cp -rp $INSTALL_DIR/support/geonode.apache $APACHE_SITES/geonode
#
# Third step is to unpack the pybundle and put the virtualenv in the right place
#
mkdir -p $GEONODE_LIB
cp -rp $INSTALL_DIR/geonode-webapp.pybundle $GEONODE_LIB
# Fourth step is to install the binary
mkdir -p $GEONODE_BIN
cp -rp $INSTALL_DIR/support/geonode.binary $GEONODE_BIN/geonode
# Fifth step is to copy the scripts and patches that would be used in postinst
mkdir -p $GEONODE_ETC
mkdir -p $GEONODE_ETC/geonetwork
mkdir -p $GEONODE_ETC/geoserver
# A copy of web.xml and config.xml are put with the config files
# they will be patched during the post-install and need to survive upgrades.
cp -rp $TOMCAT_WEBAPPS/geoserver/WEB-INF/web.xml $GEONODE_ETC/geoserver/
cp -rp $TOMCAT_WEBAPPS/geonetwork/WEB-INF/config.xml $GEONODE_ETC/geonetwork/
mkdir -p $GEONODE_SHARE
# This Django fixture contains a superuser called geonode that does not have
# a working password. It is used as the default superuser.
cp -rp $INSTALL_DIR/support/geonode.admin $GEONODE_SHARE/admin.json
cp -rp $INSTALL_DIR/support/geoserver.patch $GEONODE_SHARE
cp -rp $INSTALL_DIR/support/geonetwork.patch $GEONODE_SHARE
#
# Sixth step is to configure /etc/geonode/ with folders for custom media and templates
#
cp -rp $INSTALL_DIR/support/geonode.local_settings $GEONODE_ETC/local_settings.py
# Extra media put in the following directory will be collected in /var/www/geonode/static
# when 'geonode collectstatic -v0' is run.
mkdir -p $GEONODE_ETC/media
# The recommended way to change a template is to copy it from the original location into
# this directory, this one has precedence over all the other template locations.
mkdir -p $GEONODE_ETC/templates
}
function randpass() {
[ "$2" == "0" ] && CHAR="[:alnum:]" || CHAR="[:graph:]"
cat /dev/urandom | tr -cd "$CHAR" | head -c ${1:-32}
echo
}
function configuretomcat() {
# configure tomcat defaults to avoid geonetwork bug and increase the available ram
if grep saxon /etc/default/tomcat6
then
echo
else
cat <<- EOF >> /etc/default/tomcat6
JAVA_OPTS='-Djava.awt.headless=true -Xmx1024m -Xms1024M -XX:MaxPermSize=256m -XX:CompileCommand=exclude,net/sf/saxon/event/ReceivingContentHandler.startElement'
EOF
fi
# Patch geoserver and geonetwork config files
patch $GEONODE_ETC/geonetwork/config.xml $GEONODE_SHARE/geonetwork.patch
patch $GEONODE_ETC/geoserver/web.xml $GEONODE_SHARE/geoserver.patch
# Let geonetwork know the geonode password
sed -i "s/GEONODE_DATABASE_PASSWORD/$psqlpass/g" $GEONODE_ETC/geonetwork/config.xml
rm -rf $TOMCAT_WEBAPPS/geonetwork/WEB-INF/config.xml
ln -sf $GEONODE_ETC/geonetwork/config.xml $TOMCAT_WEBAPPS/geonetwork/WEB-INF/config.xml
rm -rf $TOMCAT_WEBAPPS/geoserver/WEB-INF/web.xml
cp -rp $GEONODE_ETC/geoserver/web.xml $TOMCAT_WEBAPPS/geoserver/WEB-INF/web.xml
# Set up logging symlinks to /var/log/geonode
mkdir -p $GEONODE_LOG
ln -sf /var/log/tomcat6/catalina.out $GEONODE_LOG/tomcat.log
ln -sf /var/log/tomcat6/geonetwork.log $GEONODE_LOG/geonetwork.log
ln -sf $GEOSERVER_DATA_DIR/logs/geoserver.log $GEONODE_LOG/geoserver.log
# Set the tomcat user as the owner
chown tomcat6. $GEOSERVER_DATA_DIR -R
chown tomcat6. $TOMCAT_WEBAPPS/geonetwork -R
chown tomcat6. $TOMCAT_WEBAPPS/geoserver -R
$TOMCAT_SERVICE restart
}
function configurepostgres() {
# configure postgres user and database
#
psqlpass=$(randpass 8 0)
if su - postgres -c 'psql -l | grep -q geonode'
then
echo
else
su - postgres -c "createdb -E UTF8 geonode"
su - postgres -c "createlang -d geonode plpgsql"
su - postgres -c "psql -d geonode -f $POSTGIS_SQL_PATH/$POSTGIS_SQL"
su - postgres -c "psql -d geonode -f $POSTGIS_SQL_PATH/spatial_ref_sys.sql"
su - postgres -c "psql -d geonode -c 'GRANT ALL ON geometry_columns TO PUBLIC;'"
su - postgres -c "psql -d geonode -c 'GRANT ALL ON spatial_ref_sys TO PUBLIC;'"
if ((GEOGRAPHY))
then
su - postgres -c "psql -d geonode -c 'GRANT ALL ON geography_columns TO PUBLIC;'"
fi
echo "CREATE ROLE geonode with login password '$psqlpass' SUPERUSER INHERIT;" >> $GEONODE_SHARE/role.sql
su - postgres -c "psql < $GEONODE_SHARE/role.sql"
fi
}
function configuredjango() {
# set up django
#
cd $GEONODE_LIB
# Install the latest version of pip and virtualenv from PyPi to avoid having
# problems in Ubuntu 10.04
# FIXME: It is less than ideal that this command access the network. Ideas?
easy_install -U virtualenv
easy_install -U pip
virtualenv .
# Verify if the virtualenv has been created and abort if bin/activate does not exist
if [ ! -f bin/activate ]
then
echo "Creation of virtualenv failed, aborting installation"
exit -1
fi
source bin/activate
touch geoserver_token
pip install geonode-webapp.pybundle
if [ -d src/GeoNodePy/geonode/media/static ]
then
mv -f src/GeoNodePy/geonode/media/static src/GeoNodePy/geonode/media/geonode
fi
if grep THE_SECRET_KEY $GEONODE_ETC/local_settings.py
then
secretkey=$(randpass 18 0)
geoserverpass=$(randpass 8 0)
sed -i "s/THE_SECRET_KEY/$secretkey/g" $GEONODE_ETC/local_settings.py
sed -i "s/THE_GEOSERVER_PASSWORD/$geoserverpass/g" $GEONODE_ETC/local_settings.py
sed -i "s/THE_DATABASE_PASSWORD/$psqlpass/g" $GEONODE_ETC/local_settings.py
fi
ln -sf $GEONODE_ETC/local_settings.py $GEONODE_LIB/src/GeoNodePy/geonode/local_settings.py
# Set up logging symlink
ln -sf /var/log/apache2/error.log $GEONODE_LOG/apache.log
chmod +x $GEONODE_BIN/geonode
geonode syncdb --noinput
geonode collectstatic -v0 --noinput
geonode loaddata /usr/share/geonode/admin.json
}
function configureapache() {
# Setup apache
#
chown www-data -R $GEONODE_WWW
a2dissite default
a2enmod proxy_http
sitedir=`$GEONODE_LIB/bin/python -c "from distutils.sysconfig import get_python_lib; print get_python_lib()"`
sed -i '1d' $APACHE_SITES/geonode
sed -i "1i WSGIDaemonProcess geonode user=www-data threads=15 processes=2 python-path=$sitedir" $APACHE_SITES/geonode
a2ensite geonode
$APACHE_SERVICE restart
}
function postinstall {
configurepostgres
configuretomcat
configuredjango
configureapache
}
if [ $# -eq 1 ]
then
printf "Sourcing %s as the configuration file\n" $1
source $1
else
printf "Usage: %s: [-s value] configfile\n" $(basename $0) >&2
exit 2
fi
if [ "$stepflag" ]
then
printf "\tStep: '$stepval specified\n"
else
stepval="all"
echo "heh"
fi
case $stepval in
pre)
echo "Running GeoNode preinstall ..."
preinstall
;;
post)
echo "Running GeoNode postinstall ..."
postinstall
;;
all)
echo "Running GeoNode installation ..."
preinstall
postinstall
;;
*)
printf "\tValid values for step parameter are: 'pre', 'post','all'\n"
printf "\tDefault value for step is 'all'\n"
;;
esac
|
makinacorpus/geonode
|
shared/package/install.sh
|
Shell
|
gpl-3.0
| 8,600 |
#!/bin/sh
if ! pgrep --help 2>/dev/null >/dev/null; then
echo DONE 1
echo "pgrep is required" >&2
exit 1
fi
BASE_DIR="$(dirname "$(readlink -f "$0")")"
# load the data required for generating the callback
. "$BASE_DIR/env.sh"
URL_PREFIX="$CALLBACK_HOST/dynflow/tasks/$TASK_ID"
AUTH="$TASK_ID:$OTP"
CURL="curl --silent --show-error --fail --max-time 10"
MY_LOCK_FILE="$BASE_DIR/retrieve_lock.$$"
MY_PID=$$
echo $MY_PID >"$MY_LOCK_FILE"
LOCK_FILE="$BASE_DIR/retrieve_lock"
TMP_OUTPUT_FILE="$BASE_DIR/tmp_output"
RUN_TIMEOUT=30 # for how long can the script hold the lock
WAIT_TIMEOUT=60 # for how long the script is trying to acquire the lock
START_TIME=$(date +%s)
fail() {
echo RUNNING
echo "$1"
exit 1
}
acquire_lock() {
# try to acquire lock by creating the file (ln should be atomic an fail in case
# another process succeeded first). We also check the content of the lock file,
# in case our process won when competing over the lock while invalidating
# the lock on timeout.
ln "$MY_LOCK_FILE" "$LOCK_FILE" 2>/dev/null || [ "$(head -n1 "$LOCK_FILE")" = "$MY_PID" ]
return $?
}
# acquiring the lock before proceeding, to ensure only one instance of the script is running
while ! acquire_lock; do
# we failed to create retrieve_lock - assuming there is already another retrieve script running
current_pid=$(head -n1 "$LOCK_FILE")
if [ -z "$current_pid" ]; then
continue
fi
# check whether the lock is not too old (compared to $RUN_TIMEOUT) and try to kill
# if it is, so that we don't have a stalled processes here
lock_lines_count=$(wc -l < "$LOCK_FILE")
current_lock_time=$(stat --format "%Y" "$LOCK_FILE")
current_time=$(date +%s)
if [ "$(( current_time - START_TIME ))" -gt "$WAIT_TIMEOUT" ]; then
# We were waiting for the lock for too long - just give up
fail "Wait time exceeded $WAIT_TIMEOUT"
elif [ "$(( current_time - current_lock_time ))" -gt "$RUN_TIMEOUT" ]; then
# The previous lock it hold for too long - re-acquiring procedure
if [ "$lock_lines_count" -gt 1 ]; then
# there were multiple processes waiting for lock without resolution
# longer than the $RUN_TIMEOUT - we reset the lock file and let processes
# to compete
echo "RETRY" > "$LOCK_FILE"
fi
if [ "$current_pid" != "RETRY" ]; then
# try to kill the currently stalled process
kill -9 "$current_pid" 2>/dev/null
fi
# try to add our process as one candidate
echo $MY_PID >> "$LOCK_FILE"
if [ "$( head -n2 "$LOCK_FILE" | tail -n1 )" = "$MY_PID" ]; then
# our process won the competition for the new lock: it is the first pid
# after the original one in the lock file - take ownership of the lock
# next iteration only this process will get through
echo $MY_PID >"$LOCK_FILE"
fi
else
# still waiting for the original owner to finish
sleep 1
fi
done
release_lock() {
rm "$MY_LOCK_FILE"
rm "$LOCK_FILE"
}
# ensure the release the lock at exit
trap "release_lock" EXIT
# make sure we clear previous tmp output file
if [ -e "$TMP_OUTPUT_FILE" ]; then
rm "$TMP_OUTPUT_FILE"
fi
pid=$(cat "$BASE_DIR/pid")
[ -f "$BASE_DIR/position" ] || echo 1 > "$BASE_DIR/position"
position=$(cat "$BASE_DIR/position")
prepare_output() {
if [ -e "$BASE_DIR/manual_mode" ] || ([ -n "$pid" ] && pgrep -P "$pid" >/dev/null 2>&1); then
echo RUNNING
else
echo "DONE $(cat "$BASE_DIR/exit_code" 2>/dev/null)"
fi
[ -f "$BASE_DIR/output" ] || exit 0
tail --bytes "+${position}" "$BASE_DIR/output" > "$TMP_OUTPUT_FILE"
cat "$TMP_OUTPUT_FILE"
}
# prepare the callback payload
payload() {
if [ -n "$1" ]; then
exit_code="$1"
else
exit_code=null
fi
if [ -e "$BASE_DIR/manual_mode" ]; then
manual_mode=true
output=$(prepare_output | base64 -w0)
else
manual_mode=false
fi
echo "{ \"exit_code\": $exit_code,"\
" \"step_id\": \"$STEP_ID\","\
" \"manual_mode\": $manual_mode,"\
" \"output\": \"$output\" }"
}
if [ "$1" = "push_update" ]; then
if [ -e "$BASE_DIR/exit_code" ]; then
exit_code="$(cat "$BASE_DIR/exit_code")"
action="done"
else
exit_code=""
action="update"
fi
$CURL -X POST -d "$(payload $exit_code)" -u "$AUTH" "$URL_PREFIX"/$action 2>>"$BASE_DIR/curl_stderr"
success=$?
else
prepare_output
success=$?
fi
if [ "$success" = 0 ] && [ -e "$TMP_OUTPUT_FILE" ]; then
# in case the retrieval was successful, move the position of the cursor to be read next time
bytes=$(wc --bytes < "$TMP_OUTPUT_FILE")
expr "${position}" + "${bytes}" > "$BASE_DIR/position"
fi
|
theforeman/smart_proxy_remote_execution_ssh
|
lib/smart_proxy_remote_execution_ssh/async_scripts/retrieve.sh
|
Shell
|
gpl-3.0
| 4,870 |
cleanup_loopback_image_file() {
losetup -d "$BLOCK_DEVICE"
}
register_cleanup "cleanup_loopback_image_file"
BLOCK_DEVICE="$(losetup --show -f "$IMAGEFILE")"
if ! [ -b "$BLOCK_DEVICE" ]; then
fatal "Block device '$BLOCK_DEVICE' is not available... WTF?"
fi
|
ucib/ucib
|
plugins/misc/raw-image-file/tasks/03-loopback-image-file.sh
|
Shell
|
gpl-3.0
| 261 |
#@ {
#@ "targets":
#@ [{
#@ "name":"version.xml","dependencies":
#@ [
#@ {"ref":"../versioninfo.txt","rel":"misc"}
#@ ,{"ref":"cat","rel":"tool"}
#@ ]
#@ }]
#@ }
abort()
{
exit -1
}
trap 'abort' 0
set -eo pipefail
dir_target="$1"
in_dir="$2"
echo -e '<?xml version="1.0"?>
<content><subtitle>version ' > "$dir_target/$in_dir"/version.xml
cat "$dir_target/$in_dir"/../versioninfo.txt >> "$dir_target/$in_dir"/version.xml
echo '</subtitle></content>' >> "$dir_target/$in_dir"/version.xml
trap : 0
|
milasudril/anja
|
doc/version.sh
|
Shell
|
gpl-3.0
| 514 |
#Creates docker image and tag
sudo docker build --force-rm=true --no-cache=true --shm-size=1G -t fnietoga/oracledatabase:11.2.0.2-xe -f dockerfile.xe .
#sudo docker push fnietoga/oracledatabase:11.2.0.2-xe
#Run docker with custom scripts
#sudo docker events
sudo docker run --name oracle -p 1521:1521 -p 8089:8080 --shm-size=1G -t -d fnietoga/oracledatabase:11.2.0.2-xe
#Container logs
sudo docker logs --details linux
#Interactive bash to docker container
#sudo docker exec -i -t oracle /bin/bash
sudo docker container stop oracle
sudo docker container rm oracle
##Attach to a image fault in build
docker run --rm -it 87b3a94b9960 /bin/bash
|
fnietoga/dockerWcfOra
|
containers/oracle-11.2.0.2-xe/dockerCommands.sh
|
Shell
|
gpl-3.0
| 658 |
#!/bin/sh
KEYFILE="/tmp/fatrat-webui.key"
CONFIGFILE="/tmp/fatrat-webui.cnf"
CSRFILE="/tmp/fatrat-webui.csr"
CRTFILE="/tmp/fatrat-webui.crt"
PEMFILE="/tmp/fatrat-webui.pem"
if ! which sed >/dev/null; then
echo "No sed installed"
exit 1
fi
if ! which openssl >/dev/null; then
echo "No openssl installed"
exit 1
fi
sed "s/%HOSTNAME%/$2/g" < "$1" > "$CONFIGFILE"
openssl genrsa -out "$KEYFILE" 4096
openssl req -new -key "$KEYFILE" -config "$CONFIGFILE" -out "$CSRFILE"
openssl x509 -req -days 3650 -in "$CSRFILE" -signkey "$KEYFILE" -out "$CRTFILE"
cat "$KEYFILE" "$CRTFILE" > "$PEMFILE"
rm -f -- "$KEYFILE" "$CONFIGFILE" "$CSRFILE"
if [ ! -f "$CRTFILE" ]; then
echo "Failed to generate a certificate"
exit 1
fi
rm -f -- "$CRTFILE"
exit 0
|
LubosD/fatrat
|
data/genssl.sh
|
Shell
|
gpl-3.0
| 758 |
#!/bin/bash
# simple script to download and install the
# google logging library and ceres-solver library
wget http://google-glog.googlecode.com/files/glog-0.3.3.tar.gz
tar xvf glog-0.3.3.tar.gz
cd glog-0.3.3
./configure
make -j2
sudo make install
cd ..
wget http://ceres-solver.googlecode.com/files/ceres-solver-1.7.0.tar.gz
tar xvf ceres-solver-1.7.0.tar.gz
sudo apt-get install libatlas-base-dev libeigen3-dev libsuitesparse-dev
mkdir ceres-bin
cd ceres-bin
cmake ../ceres-solver-1.7.0 -DGFLAGS=OFF
make -j2
make test
sudo make install
|
Breakthru/sequential_nrsfm
|
sequential/build_ceres.sh
|
Shell
|
gpl-3.0
| 539 |
if [[ -z "$1" ]] || [[ -z "$2" ]]; then
echo "Usage: $0 <vm_name> <image_url>"
exit 1
fi
# Some installers:
# debian8 (jessie): http://ftp.nl.debian.org/debian/dists/jessie/main/installer-amd64/
# ubuntu 16.04 (xenial): http://archive.ubuntu.com/ubuntu/dists/xenial/main/installer-amd64/
vm_name=$1
installer_url=$2
disk_location=$HOME/vm/$vm_name.qcow2
qemu-img create -f qcow2 "$disk_location" 20G
virt-install \
--name "$vm_name" \
--ram 2048 \
--disk path=$disk_location \
--vcpus 1 \
--os-type linux \
--os-variant generic \
--network bridge=virbr0 \
--graphics none \
--console pty,target_type=serial \
--location "$installer_url" \
--extra-args 'console=ttyS0'
# Run `systemctl enable getty@ttyS0` inside the guest to make the console permanent
|
arthurljones/dotfiles
|
scripts/make_vm.sh
|
Shell
|
gpl-3.0
| 788 |
#!/bin/bash
set -e -u -o pipefail || exit 1
. "$( dirname "$( readlink -e "${0}" )" )/server-common.sh"
test "${#}" -eq 0 || { echo 'rabbitmq-bql: wrong arguments!' >&2 ; exit 1 ; }
test -e "${ESCP_RABBITMQ_SERVER_BIN}_bql/bql" || { echo 'rabbitmq-bql: `bql` not found!' >&2 ; exit 1 ; }
exec "${ESCP_RABBITMQ_SERVER_BIN}_bql/bql" -host "${RABBITMQ_NODE_IP_ADDRESS}" -port "${RABBITMQ_NODE_PORT}"
exit 1
|
cipriancraciun/extremely-simple-cluster-platform
|
components/rabbitmq-server/scripts/bql-shell.sh
|
Shell
|
gpl-3.0
| 409 |
#!/bin/bash
#######################################
# ### Raúl Caro Pastorino ### #
## ## ## ##
### # https://github.com/fryntiz/ # ###
## ## ## ##
# ### www.fryntiz.es ### #
#######################################
# Realiza un script que admite un número indeterminado de parámetros. Si el
# parámetro es un fichero y ocupa más de 50 bytes se borrará. Si es un
# directorio se pedirá confirmación antes de borrarlo. En cualquier otro caso se
# mostrará un mensaje de error “no se procesa el argumento: <argumento>”.
# Sólo se procesará el directorio de trabajo.
TMP=""
#clear
if [ $# -lt 1 ]; then
echo "Se necesita al menos 1 parámetro"
exit 1
fi
for i in $*; do
TMP=`du -b $i`
TMP=`echo $TMP | tr -s " " | cut -d " " -f 1`
if [ -f $i ] && [ `echo $TMP` -gt 50 ]; then
echo "$i es un archivo con un tamaño de $TMP bytes"
rm $i
elif [ -d $i ]; then
echo "$i es un directorio, desea borrarlo [s/n] no por omisión"
read input
case $input in
s|S|y|Y) rm -R $i;;
*) echo "No se ha borrado $i";;
esac
else
echo "Error, no se procesa el argumento: $i"
fi
done
exit 0
|
fryntiz/ciclosuperior
|
Scripts_Bash/Nivel medio/17_filtrar.sh
|
Shell
|
gpl-3.0
| 1,201 |
#!/bin/bash
WORKDIR=nsmb.d
DOL=${WORKDIR}/sys/main.dol
DOWNLOAD_LINK="https://www.dropbox.com/s/f7x8evfrc07bcbw/NSMBW%203%20The%20Final%20Levels.zip"
RIIVOLUTION_ZIP="NSMBW3_The final levels.zip"
RIIVOLUTION_DIR="NSMBW3"
GAMENAME="NSMBW3: The Final Levels"
XML_SOURCE="${RIIVOLUTION_DIR}"
XML_FILE="${RIIVOLUTION_DIR}"/../riivolution/NSMBW3.XML
GAME_TYPE=RIIVOLUTION
BANNER_LOCATION=${WORKDIR}/files/opening.bnr
WBFS_MASK="SMN[PEJ]01"
show_notes () {
echo -e \
"************************************************
${GAMENAME}
NSMBW Hack featuring a bunch of new levels.
Source: http://www.rvlution.net/forums/viewtopic.php?f=53&t=1673
Base Image: New Super Mario Bros. Wii (SMN?01)
Supported Versions: EURv1, EURv2, USAv1, USAv2, JPNv1
************************************************"
}
detect_game_version () {
nsmbw_version
GAMEID=SFL${REG_LETTER}01
if [[ ${VERSION} != EURv* ]]; then
echo -e "Versions other than PAL won't show the correct title-screen."
fi
}
place_files () {
NEW_DIRS=( "${WORKDIR}"/files/EU/NedEU/{Message,Layout} )
for dir in "${NEW_DIRS[@]}"; do
mkdir -p "${dir}"
done
case ${VERSION} in
EUR* )
LANGDIRS=( EngEU FraEU GerEU ItaEU SpaEU NedEU )
for dir in "${LANGDIRS[@]}"; do
cp -r "${RIIVOLUTION_DIR}"/EU/EngEU/Message "${WORKDIR}"/files/EU/"${dir}"/
done
cp "${RIIVOLUTION_DIR}"/EU/Layout/openingtitle/* "${WORKDIR}"/files/EU/Layout/openingTitle/openingTitle.arc
;;
USAv* )
LANGDIRS=( FraUS EngUS SpaUS )
for dir in "${LANGDIRS[@]}"; do
cp -r "${RIIVOLUTION_DIR}"/EU/EngEU/Message "${WORKDIR}"/files/US/"${dir}"/
done
;;
JPNv1 )
cp -r "${RIIVOLUTION_DIR}"/EU/EngEU/Message "${WORKDIR}"/files/JP/
;;
esac
cp -r "${RIIVOLUTION_DIR}"/Stage/ "${WORKDIR}"/files/
}
dolpatch () {
${WIT} dolpatch ${DOL} \
"802F148C=53756D6D53756E#7769696D6A3264" \
"802F118C=53756D6D53756E#7769696D6A3264" \
"802F0F8C=53756D6D53756E#7769696D6A3264" \
xml="${PATCHIMAGE_PATCH_DIR}/NSMBW_AP.xml" -q
}
|
Nanolx/patchimage
|
scripts/nsmbw/nsmbw3.sh
|
Shell
|
gpl-3.0
| 1,992 |
#!/bin/bash
# Original source: https://github.com/gauge/gauge/blob/master/script/mkdeb
# Usage:
# ./build/mkdeb.sh [--rebuild]
set -e
function err () {
echo "ERROR: $1"
exit 1
}
ROOT=`pwd -P`
DEPLOY_DIR="$ROOT/deploy"
BUILD_DIR="$ROOT/build"
OS=`uname -s | tr '[:upper:]' '[:lower:]'`
ARCH="i386"
NAME="gauge"
FILE_EXT="zip"
FILE_MODE=755
CONTROL_FILE="$BUILD_DIR/packaging/deb/control"
POSTINST_FILE="$BUILD_DIR/packaging/deb/postinst"
GAUGE_SETUP_FILE="$BUILD_DIR/packaging/gauge_setup"
if [ "$OS" != "linux" ]; then
err "This script can only be run on Linux systems"
fi
if [ "$1" == "--rebuild" ]; then
REBUILD_NEEDED=1
fi
function rebuild () {
rm -rf "$DEPLOY_DIR"
go run build/make.go --all-platforms --target-linux
go run build/make.go --distro --all-platforms --target-linux
}
function check_and_rebuild() {
if [ ! -d "$DEPLOY_DIR" ]; then
echo -e "Building distro packages...\n"
rebuild
elif [ ! -z "$REBUILD_NEEDED" ]; then
echo -e "Rebuild flag set. Rebuilding distro packages...\n"
rebuild
else
echo -e "Reusing existing distro package. Use '--rebuild' to trigger a package rebuild...\n"
fi
}
function set_arch() {
if [ -z "$1" ]; then
ARCHTYPE=$(ls $NAME*.$FILE_EXT | head -1 | rev | cut -d '-' -f 1 | rev | cut -d '.' -f 2)
else
ARCHTYPE=$(echo $1 | sed "s/^[a-z]*\///" | rev | cut -d '-' -f 1 | rev | cut -d '.' -f 2)
fi
if [ "$ARCHTYPE" == "x86_64" ]; then
ARCH="amd64"
else
ARCH="i386"
fi
}
function set_version() {
if [ -z "$1" ]; then
VERSION=$(ls $NAME*$ARCHTYPE.$FILE_EXT | head -1 | sed "s/\.[^\.]*$//" | sed "s/$NAME-//" | sed "s/-[a-z]*\.[a-z0-9_]*$//")
else
VERSION=$(echo `basename $1` | sed "s/^[a-z]*\///" | sed "s/\.[^\.]*$//" | sed "s/$NAME-//" | sed "s/-[a-z]*\.[a-z0-9_]*$//")
fi
}
function set_pkg_info() {
PKG="$DEPLOY_DIR/$NAME-$VERSION-$OS.$ARCHTYPE.$FILE_EXT"
PKG_SRC="$DEPLOY_DIR/$NAME-$VERSION-pkg"
}
function set_info() {
set_arch "$1"
set_version "$1"
set_pkg_info
}
function clean_stage() {
TARGET_ROOT="$DEPLOY_DIR/deb"
rm -rf "$TARGET_ROOT"
mkdir -p "$TARGET_ROOT"
chmod $FILE_MODE "$TARGET_ROOT"
TARGET="$TARGET_ROOT/$NAME-$VERSION-$ARCH"
DEB_PATH="$DEPLOY_DIR/"
}
function prep_deb() {
echo "Preparing .deb data..."
mkdir -m $FILE_MODE -p "$TARGET/usr/local/gauge"
cp -r "$PKG_SRC/bin" "$TARGET/usr/local"
cp -r "$PKG_SRC/config" "$TARGET/usr/local/gauge"
mkdir -m $FILE_MODE -p "$TARGET/DEBIAN"
cp "$CONTROL_FILE" "$TARGET/DEBIAN/control"
cp "$POSTINST_FILE" "$TARGET/DEBIAN/postinst"
cp "$GAUGE_SETUP_FILE" "$TARGET/usr/local/bin/gauge_setup"
chmod +x $TARGET/usr/local/bin/*
sync
CONTROL_DATA=$(cat "$TARGET/DEBIAN/control")
INSTALLED_SIZE=$(du -s $PKG_SRC/bin/ | sed "s/^\([0-9]*\).*$/\1/")
while [ $INSTALLED_SIZE -lt 1 ]; do
INSTALLED_SIZE=$(du -s $PKG_SRC/bin/ | sed "s/^\([0-9]*\).*$/\1/")
done
echo "$CONTROL_DATA" | sed "s/<version>/$VERSION/" | sed "s/<arch>/$ARCH/" | sed "s/<size>/$INSTALLED_SIZE/" > "$TARGET/DEBIAN/control"
# Copy generated LICENSE.md to /usr/share/doc/gauge/copyright
mkdir -m $FILE_MODE -p "$TARGET/usr/share/doc/$NAME"
cp "$ROOT/LICENSE" "$TARGET/usr/share/doc/$NAME/copyright"
}
function create_deb() {
echo "Generating .deb..."
fakeroot dpkg-deb -b "$TARGET"
mv "$TARGET_ROOT/$NAME-$VERSION-$ARCH.deb" "$DEB_PATH"
}
function cleanup_temp() {
rm -rf "$TARGET_ROOT"
rm -rf "$PKG_SRC"
}
function print_status() {
echo -e "\nCreated .deb package at: $DEB_PATH$NAME-$VERSION-$ARCH.deb"
echo -e " Version : $VERSION"
echo -e " Arch : $ARCH\n"
}
function init() {
check_and_rebuild
for f in `ls $DEPLOY_DIR/$NAME-*$OS*.$FILE_EXT`; do
clean_stage
pushd $DEPLOY_DIR > /dev/null
set_info "$f"
unzip -q "$PKG" -d "$PKG_SRC"
popd > /dev/null
clean_stage
prep_deb
create_deb
cleanup_temp
print_status
done
}
# Let the game begin
init
|
RabihHallage/gauge
|
build/mkdeb.sh
|
Shell
|
gpl-3.0
| 4,157 |
set -e
set -u
../../bin/dcd-client $1 file.d -c19 > actual1.txt
diff actual1.txt expected1.txt
../../bin/dcd-client $1 file.d -c37 > actual2.txt
diff actual2.txt expected2.txt
|
Hackerpilot/DCD
|
tests/tc_template_param_props/run.sh
|
Shell
|
gpl-3.0
| 178 |
cd ~
touch .netrc
echo "machine github.com login [your-username] password [your-password]" >> .netrc
|
arpan-chavda/my-ubuntu-scripts
|
configs/git_store.sh
|
Shell
|
gpl-3.0
| 101 |
i#!/usr/bin/env bash
clean=1 # Delete phpunit.phar after the tests are complete?
aftercmd="php phpunit.phar --bootstrap bootstrap.php src/tests"
gpg --fingerprint D8406D0D82947747293778314AA394086372C20A
if [ $? -ne 0 ]; then
echo -e "\033[33mDownloading PGP Public Key...\033[0m"
gpg --recv-keys D8406D0D82947747293778314AA394086372C20A
# Sebastian Bergmann <[email protected]>
gpg --fingerprint D8406D0D82947747293778314AA394086372C20A
if [ $? -ne 0 ]; then
echo -e "\033[31mCould not download PGP public key for verification\033[0m"
exit
fi
fi
if [ "$clean" -eq 1 ]; then
# Let's clean them up, if they exist
if [ -f phpunit.phar ]; then
rm -f phpunit.phar
fi
if [ -f phpunit.phar.asc ]; then
rm -f phpunit.phar.asc
fi
fi
# Let's grab the latest release and its signature
if [ ! -f phpunit.phar ]; then
wget https://phar.phpunit.de/phpunit.phar
fi
if [ ! -f phpunit.phar.asc ]; then
wget https://phar.phpunit.de/phpunit.phar.asc
fi
# Verify before running
gpg --verify phpunit.phar.asc phpunit.phar
if [ $? -eq 0 ]; then
echo
echo -e "\033[33mBegin Unit Testing\033[0m"
# Run the testing suite
`$after_cmd`
# Cleanup
if [ "$clean" -eq 1 ]; then
echo -e "\033[32mCleaning Up!\033[0m"
rm -f phpunit.phar
rm -f phpunit.phar.asc
fi
else
echo
chmod -x phpunit.phar
mv phpunit.phar /tmp/bad-phpunit.phar
mv phpunit.phar.asc /tmp/bad-phpunit.phar.asc
echo -e "\033[31mSignature did not match! PHPUnit has been moved to /tmp/bad-phpunit.phar\033[0m"
exit 1
fi
|
lecaoquochung/liho-ubun
|
php/install_phpunit.sh
|
Shell
|
gpl-3.0
| 1,625 |
#!/bin/bash
set -e
pegasus_lite_version_major="4"
pegasus_lite_version_minor="7"
pegasus_lite_version_patch="0"
pegasus_lite_enforce_strict_wp_check="true"
pegasus_lite_version_allow_wp_auto_download="true"
. pegasus-lite-common.sh
pegasus_lite_init
# cleanup in case of failures
trap pegasus_lite_signal_int INT
trap pegasus_lite_signal_term TERM
trap pegasus_lite_exit EXIT
echo -e "\n################################ Setting up workdir ################################" 1>&2
# work dir
export pegasus_lite_work_dir=$PWD
pegasus_lite_setup_work_dir
echo -e "\n###################### figuring out the worker package to use ######################" 1>&2
# figure out the worker package to use
pegasus_lite_worker_package
echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2
# set the xbit for any executables staged
/bin/chmod +x example_workflow-averageratioevent_0-1.0
echo -e "\n############################# executing the user tasks #############################" 1>&2
# execute the tasks
set +e
pegasus-kickstart -n example_workflow::averageratioevent_0:1.0 -N ID0000024 -R condorpool -L example_workflow -T 2016-11-08T20:46:03+00:00 ./example_workflow-averageratioevent_0-1.0
job_ec=$?
set -e
|
elainenaomi/sciwonc-dataflow-examples
|
dissertation2017/Experiment 1A/instances/10_1_workflow_full_10files_secondary_w1_3sh_3rs_with_annot_with_proj_3s_range/dags/ubuntu/pegasus/example_workflow/20161108T204604+0000/00/00/averageratioevent_0_ID0000024.sh
|
Shell
|
gpl-3.0
| 1,261 |
#!/bin/sh
if [ -d "./workspace/test_base/" ]; then
echo "The /worspace/test_base/ directory exists. Please remove (or backup before continuing)"
exit 0
fi
./1_from_csv.sh test_base.csv test_base
iter=100
./3_batch_from_dir.sh test_base $iter
|
robertour/CulSim
|
experiments/genocide/test/test_experiment.sh
|
Shell
|
gpl-3.0
| 252 |
#!/usr/bin/env bash
set -x
readonly BOT_ROOT=/b
readonly BOT_ROOT_NAME=$1
readonly BOT_PASS=$2
#pushd /tmp
#curl -sSO https://dl.google.com/cloudagents/install-monitoring-agent.sh
#bash install-monitoring-agent.sh
#curl -sSO https://dl.google.com/cloudagents/install-logging-agent.sh
#bash install-logging-agent.sh --structured
#popd
apt-get update -y
apt-get upgrade -y
apt-get install sudo -y
# FIXME(EricWF): Remove this hack. It's only in place to temporarily fix linking libclang_rt from the
# debian packages.
# WARNING: If you're not a buildbot, DO NOT RUN!
apt-get install lld-9
rm /usr/bin/ld
ln -s /usr/bin/lld-9 /usr/bin/ld
systemctl set-property buildslave.service TasksMax=100000
function setup_numbered_bot() {
local BOT_NAME=$1
local BOT_DIR=$2
mkdir -p $BOT_DIR
buildslave stop $BOT_DIR
chown buildbot:buildbot $BOT_DIR
rm -rf $BOT_DIR/*
buildslave create-slave --allow-shutdown=signal "$BOT_DIR" "lab.llvm.org:9990" "$BOT_NAME" "$BOT_PASS"
echo "Eric Fiselier <[email protected]>" > $BOT_DIR/info/admin
echo "Connecting as $1"
{
uname -a | head -n1
cmake --version | head -n1
g++ --version | head -n1
ld --version | head -n1
date
lscpu
} > $BOT_DIR/info/host
#echo "SLAVE_RUNNER=/usr/bin/buildslave
#SLAVE_ENABLED[1]=\"1\"
#SLAVE_NAME[1]=\"$BOT_NAME\"
#SLAVE_USER[1]=\"buildbot\"
#SLAVE_BASEDIR[1]=\"$BOT_DIR\"
#SLAVE_OPTIONS[1]=\"\"
#SLAVE_PREFIXCMD[1]=\"\"" > $BOT_DIR/buildslave.cfg
ls $BOT_DIR/
cat $BOT_DIR/buildbot.tac
}
function try_start_builder {
local N=$1
local BOT_DIR="$BOT_ROOT/b$N"
local BOT_NAME="$BOT_ROOT_NAME$N"
systemctl daemon-reload
service buildslave restart
setup_numbered_bot "$BOT_NAME" "$BOT_DIR"
systemctl daemon-reload
service buildslave restart
chown -R buildbot:buildbot $BOT_DIR/
sudo -u buildbot /usr/bin/buildslave start $BOT_DIR/
sleep 30
cat $BOT_DIR/twistd.log
if grep --quiet "slave is ready" $BOT_DIR/twistd.log; then
return 0
fi
if grep --quiet "configuration update complete" $BOT_DIR/twistd.log; then
return 0
fi
if grep "rejecting duplicate slave" $BOT_DIR/twistd.log; then
return 1
fi
echo "Unknown error"
cat $BOT_DIR/twistd.log
exit 1
}
for N in `shuf -i 1-5`
do
if try_start_builder $N; then
break
fi
echo "failed to start any buildbot"
shutdown now
done
# GCE can restart instance after 24h in the middle of the build.
# Gracefully restart before that happen.
sleep 72000
while pkill -SIGHUP buildslave; do sleep 5; done;
shutdown now
|
r0mai/metashell
|
3rd/templight/libcxx/utils/docker/scripts/run_buildbot.sh
|
Shell
|
gpl-3.0
| 2,538 |
#!/bin/bash
# generate crt/csr/key/pem pair with openssl
OUT_NAME="vpn_cert"
OUT_PASSLESS_NAME="new_vpn_cert"
OUT_DIR="certs/"
OUT_DAYS=356
OUT_CRYPTOLEVEL=4096
OUT_CRYPTO="des3"
#create directory automagically if not exists
if [ -d "$OUT_DIR" ]
then
echo "we are creating the directory $OUT_DIR because it doesnt exist..."
mkdir $OUT_DIR
fi
#first the key
openssl genrsa -${OUT_CRYPTO} -out ${OUT_DIR}ssl_${OUT_NAME}.key ${OUT_CRYPTOLEVEL}
#secondly the csr
openssl req -new -key ${OUT_DIR}ssl_${OUT_NAME}.key -out ${OUT_DIR}ssl_${OUT_NAME}.csr
#third part is the self signed cert, disable this if you have key auth stuff going...
openssl x509 -req -days ${OUT_DAYS} -in ${OUT_DIR}ssl_${OUT_NAME}.csr -signkey ${OUT_DIR}ssl_${OUT_NAME}.key -out ${OUT_DIR}ssl_${OUT_NAME}.crt
#store a new key without password so one may start webserver without keyphrase
openssl rsa -in ${OUT_DIR}ssl_${OUT_NAME}.key -out ${OUT_DIR}${OUT_PASSLESS_NAME}.key
#and finally the reduced .pem file for apache2 etc...
cat ${OUT_DIR}ssl_${OUT_NAME}.crt | openssl x509 > ${OUT_DIR}ssl_${OUT_NAME}.pem
#now we need a crl for vpn...
openssl ca -gencrl -keyfile ${OUT_DIR}${OUT_PASSLESS_NAME}.key -cert ${OUT_DIR}ssl_${OUT_NAME}.crt -out ${OUT_DIR}ssl_${OUT_NAME}.crl.pem
openssl crl -inform PEM -in ${OUT_DIR}ssl_${OUT_NAME}.crl.pem -outform DER -out ${OUT_DIR}ssl_${OUT_NAME}.crl
#generating sha checksums for the generated cert files.
sha512sum ${OUT_DIR}ssl_${OUT_NAME}.key > ${OUT_DIR}ssl_${OUT_NAME}.sums
sha512sum ${OUT_DIR}ssl_${OUT_NAME}.csr >> ${OUT_DIR}ssl_${OUT_NAME}.sums
sha512sum ${OUT_DIR}ssl_${OUT_NAME}.crt >> ${OUT_DIR}ssl_${OUT_NAME}.sums
sha512sum ${OUT_DIR}ssl_${OUT_NAME}.pem >> ${OUT_DIR}ssl_${OUT_NAME}.sums
sha512sum ${OUT_DIR}ssl_${OUT_NAME}.crl.pem >> ${OUT_DIR}ssl_${OUT_NAME}.sums
sha512sum ${OUT_DIR}ssl_${OUT_NAME}.crl >> ${OUT_DIR}ssl_${OUT_NAME}.sums
#flush memory after cert generation...
#sync; echo 3 > /proc/sys/vm/drop_caches
exit 0
|
OliverLeitner/scripting
|
gencert.sh
|
Shell
|
gpl-3.0
| 1,971 |
python3 model_main.py \
--pipeline_config_path=training/ssd_mobilenet_v2_coco.config \
--model_dir=training \
--num_train_steps=15000 \
--num_eval_steps=2000 \
--alsologtostderr
|
longlong2010/computer-vision
|
object-detection/run.sh
|
Shell
|
gpl-3.0
| 199 |
#!/bin/sh
exec tail -f \
/var/log/kern.log \
/var/log/syslog \
/var/log/mysql.log \
/var/log/messages \
/var/log/postgresql/postgresql-9.4-main.log
|
heliogabalo/The-side-of-the-source
|
Codigo/Scripts/Logs/logs_monitor.sh
|
Shell
|
mpl-2.0
| 159 |
#!/bin/sh
set -e
set -u
set -o pipefail
if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then
# If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy
# frameworks to, so exit 0 (signalling the script phase was successful).
exit 0
fi
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
if [ -r "$source" ]; then
# Copy the dSYM into a the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .framework.dSYM "$source")"
binary="${DERIVED_FILES_DIR}/${basename}.framework.dSYM/Contents/Resources/DWARF/${basename}"
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"Mach-O dSYM companion"* ]]; then
strip_invalid_archs "$binary"
fi
if [[ $STRIP_BINARY_RETVAL == 1 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.framework.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.framework.dSYM"
fi
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
STRIP_BINARY_RETVAL=0
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=1
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/Device.swift/Device_swift.framework"
install_framework "${BUILT_PRODUCTS_DIR}/GoogleToolboxForMac/GoogleToolboxForMac.framework"
install_framework "${BUILT_PRODUCTS_DIR}/IQKeyboardManagerSwift/IQKeyboardManagerSwift.framework"
install_framework "${BUILT_PRODUCTS_DIR}/Kingfisher/Kingfisher.framework"
install_framework "${BUILT_PRODUCTS_DIR}/R.swift.Library/Rswift.framework"
fi
if [[ "$CONFIGURATION" == "Production" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/Device.swift/Device_swift.framework"
install_framework "${BUILT_PRODUCTS_DIR}/GoogleToolboxForMac/GoogleToolboxForMac.framework"
install_framework "${BUILT_PRODUCTS_DIR}/IQKeyboardManagerSwift/IQKeyboardManagerSwift.framework"
install_framework "${BUILT_PRODUCTS_DIR}/Kingfisher/Kingfisher.framework"
install_framework "${BUILT_PRODUCTS_DIR}/R.swift.Library/Rswift.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
yonadev/yona-app-ios
|
Yona/Pods/Target Support Files/Pods-Yona/Pods-Yona-frameworks.sh
|
Shell
|
mpl-2.0
| 7,708 |
#!/bin/bash
set -e
[ "$#" -ge 2 ] || { echo Usage: $0 model_family model_version >&2; exit 1; }
family="$1"
version="$2"
dir="udpipe-$family-$version"
[ -d "$dir" ] && { echo Release $dir already exists >&2; exit 1; }
mkdir "$dir"
cp LICENSE.CC-BY-NC-SA-4 "$dir"/LICENSE
make -C ../doc manual_model_${family}_readme.{txt,html}
cp ../doc/manual_model_${family}_readme.txt "$dir"/README
cp ../doc/manual_model_${family}_readme.html "$dir"/README.html
make -C ../doc clean
for model in ../training/models-$family/*/*.model; do
lang=`basename $model .model`
long_name=`awk "/^$lang /{print \\\$2}" models.txt`
[ -z "$long_name" ] && { echo Unknown language code $lang >&2; exit 1; }
ln -s ../$model $dir/$long_name-$family-$version.udpipe
done
|
ufal/udpipe
|
releases/release-model.sh
|
Shell
|
mpl-2.0
| 755 |
#!/bin/sh
# We recommend you run this as a pre-push hook: to reduce
# review turn-around time, we want all pushes to run tests
# locally. Using this hook will guarantee your hook gets
# updated as the repository changes.
#
# This hook tries to run as much as possible without taking
# too long.
#
# You can use it by running this command from the project root:
# `ln -s ../../quality/pre-push-recommended.sh .git/hooks/pre-push`
# Descriptions for each gradle task below can be found in the
# output of `./gradlew tasks`.
./gradlew -q \
ktlint \
detekt \
assembleFocusDebugAndroidTest \
testFocusDebugUnitTest
# Tasks omitted because they take a long time to run:
# - unit test on all variants
# - UI tests
# - lint (compiles all variants)
|
mozilla-mobile/focus-android
|
quality/pre-push-recommended.sh
|
Shell
|
mpl-2.0
| 775 |
#!/bin/bash
echo Creating OpenMotics directory
mkdir -p /opt/openmotics/bin
mkdir -p /opt/openmotics/etc
mkdir -p /opt/openmotics/download
echo Copy OpenMotics software
cp -R python /opt/openmotics/
cp -R Updater /opt/openmotics/
cp -R static /opt/openmotics/
## Copy the bootloader
cp binaries/AN1310cl /opt/openmotics/bin/
cp Bootloader/devices.db /opt/openmotics/bin/
cp binaries/updateController.sh /opt/openmotics/bin
## TODO Place a copy of the hex file on the gateway
touch /opt/openmotics/firmware.hex
## Configure beaglebone ports at boot
cat << EOF > /opt/openmotics/bin/configure_ports.sh
#!/bin/bash
# UART 1
echo 20 > /sys/kernel/debug/omap_mux/uart1_rxd
echo 0 > /sys/kernel/debug/omap_mux/uart1_txd
# UART 2
echo 21 > /sys/kernel/debug/omap_mux/spi0_sclk
echo 1 > /sys/kernel/debug/omap_mux/spi0_d0
# UART 4
echo 26 > /sys/kernel/debug/omap_mux/gpmc_wait0
echo 6 > /sys/kernel/debug/omap_mux/gpmc_wpn
echo 6 > /sys/kernel/debug/omap_mux/lcd_data13
# UART 5
echo 24 > /sys/kernel/debug/omap_mux/lcd_data9
echo 4 > /sys/kernel/debug/omap_mux/lcd_data8
# OpenMotics home LED
echo 7 > /sys/kernel/debug/omap_mux/lcd_data5
echo 75 > /sys/class/gpio/export
echo out > /sys/class/gpio/gpio75/direction
echo 1 > /sys/class/gpio/gpio75/value
# Ethernet LEDs
for i in 48 49 60 117;
do
echo \$i > /sys/class/gpio/export
echo out > /sys/class/gpio/gpio\${i}/direction
echo 0 > /sys/class/gpio/gpio\${i}/value
done
# Input button
echo 38 > /sys/class/gpio/export
echo in > /sys/class/gpio/gpio38/direction
echo 26 > /sys/class/gpio/export
echo in > /sys/class/gpio/gpio26/direction
# Master reset output
echo 44 > /sys/class/gpio/export
EOF
chmod +x /opt/openmotics/bin/configure_ports.sh
mount -o remount,rw /
cat << EOF > /etc/supervisor/conf.d/configure_ports.conf
[program:configure_ports]
command=/opt/openmotics/bin/configure_ports.sh
autostart=true
autorestart=false
directory=/opt/openmotics/bin/
startsecs=0
exitcodes=0
priority=1
EOF
## Install VPN service
cat << EOF > /etc/supervisor/conf.d/vpn_service.conf
[program:vpn_service]
command=python vpn_service.py
autostart=true
autorestart=true
directory=/opt/openmotics/python
startsecs=10
EOF
## Install OpenVPN service
cat << EOF > /lib/systemd/system/openvpn.service
[Unit]
Description=OpenVPN connection to the OpenMotics cloud
[Service]
ExecStart=/usr/local/sbin/openvpn --config /etc/openvpn/vpn.conf
Restart=always
WorkingDirectory=/etc/openvpn
[Install]
WantedBy=multi-user.target
EOF
ln -s /lib/systemd/system/openvpn.service /lib/systemd/system/multi-user.target.wants/openvpn.service
## Install Openmotics service
cat << EOF > /etc/supervisor/conf.d/openmotics.conf
[program:openmotics]
command=python openmotics_service.py
autostart=true
autorestart=true
directory=/opt/openmotics/python
startsecs=10
EOF
## Install LED service
cat << EOF > /etc/supervisor/conf.d/led_service.conf
[program:led_service]
command=python led_service.py
autostart=true
autorestart=true
directory=/opt/openmotics/python
startsecs=10
priority=1
EOF
## Install watchdog
cat << EOF > /etc/supervisor/conf.d/watchdog.conf
[program:watchdog]
command=python /opt/openmotics/python/watchdog.py
autostart=true
autorestart=false
EOF
## Install Status service to control the LEDs
cat << EOF > /etc/dbus-1/system.d/com.openmotics.status.conf
<!DOCTYPE busconfig PUBLIC
"-//freedesktop//DTD D-BUS Bus Configuration 1.0//EN"
"http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
<busconfig>
<policy user="root">
<allow own="com.openmotics.status"/>
</policy>
<policy context="default">
<allow send_destination="com.openmotics.status"/>
<allow receive_sender="com.openmotics.status"/>
</policy>
<policy user="root">
<allow send_destination="com.openmotics.status"/>
<allow receive_sender="com.openmotics.status"/>
</policy>
</busconfig>
EOF
rm /etc/localtime
cp /usr/share/zoneinfo/UTC /opt/openmotics/etc/timezone
ln -s /opt/openmotics/etc/timezone /etc/localtime
mount -o remount,ro /
echo OpenMotics installed successfully
|
openmotics/gateway
|
tools/installGateway.sh
|
Shell
|
agpl-3.0
| 4,066 |
#!/bin/bash
function step() { echo -e '\033[34;1m'"$*"'\033[0m'; }
function error() { echo -e '\033[31;1m'"$*"'\033[0m'; }
function warn() { echo -e '\033[33m'"$*"'\033[0m'; }
function fail() { error "$*"; exit -1; }
function run() { echo -e '\033[35;1m'Running: '\033[33;1m'"$*"'\033[0m'; "$@"; }
ALCOLEA_COUNTER_SERIAL='501600324'
ALCOLEA_START_DATE='2016-05-01'
FONTIVSOLAR_TOTAL_SHARES=8570 # Computed by plant team
FONTIVSOLAR_COUNTER_SERIAL='68308479'
FONTIVSOLAR_START_DATE='2019-02-20'
step "Current state"
run scripts/genkwh_plants.py list
step "Renaming Alcolea meter"
run scripts/genkwh_plants.py editmeter \
GenerationkWh Alcolea "1" name "$ALCOLEA_COUNTER_SERIAL" ||
fail "Unable to change the name"
step "Setting Alcolea start date"
run scripts/genkwh_plants.py editplant \
GenerationkWh Alcolea first_active_date "$ALCOLEA_START_DATE" ||
fail "Unable to set Alcolea start date"
step "Adding the new plant"
run scripts/genkwh_plants.py addplant \
GenerationkWh Fontivsolar Fontivsolar "$FONTIVSOLAR_TOTAL_SHARES" ||
fail "Unable to add the new Fontivsolar plant"
step "Adding the plant meter"
run scripts/genkwh_plants.py addmeter \
GenerationkWh Fontivsolar "$FONTIVSOLAR_COUNTER_SERIAL" \
"Fontivsolar main meter" "$FONTIVSOLAR_START_DATE" ||
fail "Unable to add the meter for Fontivsolar plant"
step "Enabling new plant"
run scripts/genkwh_plants.py editplant GenerationkWh Fontivsolar enabled '1' ||
fail "Unable to enable the new plant"
step "Enabling new meter"
run scripts/genkwh_plants.py editmeter GenerationkWh Fontivsolar "$FONTIVSOLAR_COUNTER_SERIAL" enabled '1' ||
fail "Unable to enable the new meter"
step "Setting Fontivsolar plant start date"
run scripts/genkwh_plants.py editplant GenerationkWh Fontivsolar first_active_date "$FONTIVSOLAR_START_DATE" ||
fail "Unable to set Fontivsolar start date"
step "Setting Fontivsolar meter start date"
run scripts/genkwh_plants.py editmeter \
GenerationkWh Fontivsolar "$FONTIVSOLAR_COUNTER_SERIAL" \
first_active_date "$FONTIVSOLAR_START_DATE" ||
fail "Unable to set meter first date"
step "Resulting state"
run scripts/genkwh_plants.py list
|
Som-Energia/somenergia-generationkwh
|
scripts/genkwh_migrate_plantmeter_1_7_0_newplant.sh
|
Shell
|
agpl-3.0
| 2,211 |
#!/bin/bash
export OLD_PATH="$PATH"
export PATH="$PATH:$HOME/liteide/bin"
export GOPATH=$(pwd)
export GOBIN=$GOPATH/bin
export OLD_PS1="$PS1"
export PS1="(go $(basename $(pwd)))$PS1"
alias deactivate="unset GOPATH; unset GOBIN; unalias deactivate; export PS1=\"$OLD_PS1\"; unset OLD_PS1; export PATH=\"$OLD_PATH\"; unset OLD_PATH"
|
diegohce/camerata
|
goenv.sh
|
Shell
|
agpl-3.0
| 338 |
#lite -es6 -run dns
lite -d -v 2 -compile simple
|
luciotato/LiteScript
|
devel/util/tests/v0.6/soft-tabs/build.sh
|
Shell
|
agpl-3.0
| 48 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.