code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#! /bin/bash
# Author : TiWim
# Date : 17 / 07 / 15
# Description : Pause/play switcher for vlc or any other media player
# Use : insert a line in your configs to bind it to a key combination
media="vlc"
pid=$(pidof $media)
state=$(ps aux | grep -v grep | grep $pid | awk '{print $8}')
if [[ $state == 'Sl' ]]
then
echo stopping $media
kill -STOP $pid
else
echo launching $media
kill -CONT $pid
fi
exit 0
|
TiWim/packetList
|
scripts/soundswitcher.sh
|
Shell
|
gpl-2.0
| 455 |
#!/bin/sh
system="$1"
start="$2"
end="$3"
field="$4"
echo "SELECT UNIX_TIMESTAMP(creation) AS time, data_$field FROM loadavg WHERE system='$system' AND UNIX_TIMESTAMP(creation) > '$start' AND UNIX_TIMESTAMP(creation) <= '$end' ORDER BY row_id;" \
|mysql --batch --delimiter=: --skip-column-names sdc \
|awk '{print $1 ":" $2}'
|
jheusala/sdc
|
rrdtool/db-query-loadavg.sh
|
Shell
|
gpl-2.0
| 330 |
!/bin/bash
MY_DIR=`dirname $0`
if [[ -z "$CREATEREPO" ]]
then
CREATEREPO="createrepo_c"
fi
pushd "$MY_DIR"
$CREATEREPO $EXTRAARGS --pkglist pkglist --revision foorevisionbar --distro cpe:/o:fedoraproject:fedora:17,foo --repo abc --content plm .
rm repodata/*filelists.sqlite*
rm repodata/*other.sqlite*
rm repodata/*filelists.xml*
rm repodata/*other.xml*
rm repodata/*primary.sqlite*
popd
|
Tojaj/DeltaRepo
|
acceptance_tests/repos/repo2_only_pri/gen.sh
|
Shell
|
gpl-2.0
| 396 |
convert images/OCS-630-A.png -crop 1551x471+0+0 +repage images/OCS-630-A-0.png
convert -append images/OCS-629-B-3.png images/OCS-630-A-0.png images/OCS-629-B-3.png
rm images/OCS-630-A-0.png
convert images/OCS-630-A.png -crop 1551x792+0+474 +repage images/OCS-630-A-1.png
convert images/OCS-630-A.png -crop 1551x959+0+1261 +repage images/OCS-630-A-2.png
convert images/OCS-630-A.png -crop 1551x634+0+2215 +repage images/OCS-630-A-3.png
convert images/OCS-630-A.png -crop 1551x1667+0+2854 +repage images/OCS-630-A-4.png
#
#/OCS-630.png
convert images/OCS-630-B.png -crop 1547x157+0+0 +repage images/OCS-630-B-0.png
convert -append images/OCS-630-A-4.png images/OCS-630-B-0.png images/OCS-630-A-4.png
rm images/OCS-630-B-0.png
convert images/OCS-630-B.png -crop 1547x135+0+162 +repage images/OCS-630-B-1.png
convert images/OCS-630-B.png -crop 1547x313+0+320 +repage images/OCS-630-B-2.png
convert images/OCS-630-B.png -crop 1547x476+0+638 +repage images/OCS-630-B-3.png
convert images/OCS-630-B.png -crop 1547x322+0+1109 +repage images/OCS-630-B-4.png
convert images/OCS-630-B.png -crop 1547x57+0+1446 +repage images/OCS-630-B-5.png
convert images/OCS-630-B.png -crop 1547x293+0+1514 +repage images/OCS-630-B-6.png
convert images/OCS-630-B.png -crop 1547x313+0+1828 +repage images/OCS-630-B-7.png
convert images/OCS-630-B.png -crop 1547x629+0+2148 +repage images/OCS-630-B-8.png
convert images/OCS-630-B.png -crop 1547x635+0+2780 +repage images/OCS-630-B-9.png
convert images/OCS-630-B.png -crop 1547x73+0+3420 +repage images/OCS-630-B-10.png
convert images/OCS-630-B.png -crop 1547x631+0+3500 +repage images/OCS-630-B-11.png
convert images/OCS-630-B.png -crop 1547x131+0+4136 +repage images/OCS-630-B-12.png
convert images/OCS-630-B.png -crop 1547x233+0+4294 +repage images/OCS-630-B-13.png
#
#/OCS-630.png
|
jonnymwalker/Staroslavjanskij-Slovar
|
scripts/findindents.OCS-630.sh
|
Shell
|
gpl-2.0
| 1,805 |
#!/bin/bash
tmux new-session -n Tmux-Log -s sys \
'sudo tail -f /var/log/messages' \; \
split-window 'sudo tail -f /var/log/syslog' \; \
split-window 'sudo -i' \; \
select-layout even-vertical
|
architek/myfiles
|
files/bin/tmux_logs.sh
|
Shell
|
gpl-2.0
| 222 |
#!/bin/bash
for ((expo=8;expo<=8;expo++))
do
for ((numproc=1;numproc<=9;numproc++))
do
mpirun -np 7 ./WH_test 1.${numproc}e${expo}
done
done
|
kel85uk/WH_Cpp
|
runall.sh
|
Shell
|
gpl-2.0
| 144 |
#!/bin/sh
# fix_stereodemux.sh
# LocalRadio
#
# Created by Douglas Ward on 7/28/18.
# Copyright © 2017-2020 ArkPhone LLC. All rights reserved.
# Fix libliquid and libsndfile dylibs to load properly for stereodemux
EXECFILE=${BUILT_PRODUCTS_DIR}/${EXECUTABLE_PATH}
EXECFOLDER=${BUILT_PRODUCTS_DIR}/${EXECUTABLE_FOLDER_PATH}
LIBPATH=${BUILT_PRODUCTS_DIR}/${FRAMEWORKS_FOLDER_PATH}
NEWLIBPATH="@executable_path/../Frameworks"
echo "Modify executable_path to libliquid and libsndfile in stereodemux"
echo install_name_tool -change /opt/local/lib/libliquid.dylib @executable_path/../Frameworks/libliquid.dylib ${EXECFOLDER}/stereodemux
install_name_tool -change /opt/local/lib/libliquid.dylib @executable_path/../Frameworks/libliquid.dylib ${EXECFOLDER}/stereodemux
echo install_name_tool -change /opt/local/lib/libsndfile.1.dylib @executable_path/../Frameworks/libsndfile.1.dylib ${EXECFOLDER}/stereodemux
install_name_tool -change /opt/local/lib/libsndfile.1.dylib @executable_path/../Frameworks/libsndfile.1.dylib ${EXECFOLDER}/stereodemux
|
dsward2/LocalRadio
|
LocalRadio/Scripts/fix_stereodemux.sh
|
Shell
|
gpl-2.0
| 1,060 |
#!/system/bin/sh
# Copyright (c) 2009, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Code Aurora nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
target=`getprop ro.product.device`
case "$target" in
# 2010.01.12 Yoshikado すべて一致に変更
# "qsd8250_surf" | "qsd8250_ffa")
*)
value=`getprop persist.maxcpukhz`
case "$value" in
"")
cat /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq >\
/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq
;;
*)
echo $value > /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq
;;
esac
;;
esac
|
PyYoshi/is01_froyo_kernel
|
arch/arm/boot/mkbootimg/root/init.qcom.sh
|
Shell
|
gpl-2.0
| 2,149 |
#!/bin/sh
# Copy xusb file to initrd
#
mkdir -p "${DESTDIR}"/lib/firmware/nvidia/tegra210
xusbfile=/lib/firmware/nvidia/tegra210/xusb.bin
if [ -f "${xusbfile}" ]; then
cp "${xusbfile}" "${DESTDIR}"/lib/firmware/nvidia/tegra210
fi
usbold=/lib/firmware/tegra21x_xusb_firmware
if [ -f "${usbold}" ]; then
cp "${usbold}" "${DESTDIR}"/lib/firmware
fi
exit 0
|
armbian/build
|
packages/blobs/jetson/jetson.sh
|
Shell
|
gpl-2.0
| 359 |
pubresdir="$(cd "$(dirname "${0}")" && pwd)"
cd "${1}" || exit
curdirec="$(pwd)"
CHOBAK_INSTALL_PREFIX="${HOME}"
export CHOBAK_INSTALL_PREFIX
CHOBAK_INSTALL_JAIL=""
export CHOBAK_INSTALL_JAIL
rm -rf tmp
mkdir -p tmp
# The following litany is in case any package has it's
# own tests that must be run before install. This feature
# was added so that an administrator could disable
# installations of grunt tools that are installed with
# chorebox in the event that zie has zir own implementation
# installed from elsewhere.
rm -rf tmp/checkret.txt
if [ -f "check-before-install.pl" ]; then
perl "check-before-install.pl"
if [ -f "tmp/checkret.txt" ]; then
(
echo "Failed to install from: ${curdirec}:"
) 1>&2
exit "$(cat tmp/checkret.txt)"
fi
fi
# Identify the project type
projtype='cmd'
if [ -f "proj-info/project-type.txt" ]; then
projtype="$(cat proj-info/project-type.txt)"
else
if [ -f "proj-info/proj-name.txt" ]; then
echo cmd > proj-info/project-type.txt
else
(
echo
echo "PLEASE CREATE:"
echo " ${curdirec}/proj-info/project-type.txt"
echo
echo "Possible values:"
echo " cmd scrip-tll"
echo
) 1>&2
exit 8
fi
fi
if [ -f "proj-info/proj-name.txt" ]; then
fildesnom="$(cat "proj-info/proj-name.txt")"
echo "Project Identified: ${fildesnom}:"
else
(
echo
echo PROJECT NAME NOT FOUND
echo "$(pwd)/proj-info/proj-name.txt"
echo
) 1>&2
exit 3
fi
rm -rf tmp
mkdir -p tmp
rm -rf tmp/checkret.txt
perl "${pubresdir}/ins-pl/checkbefore.pl"
if [ -f "tmp/checkret.txt" ]; then
(
echo "Failed to install from: ${curdirec}:"
) 1>&2
exit "$(cat tmp/checkret.txt)"
fi
if [ -f "Makefile" ]; then
make build/all || exit
# Central target is 'build/all' rather than 'all' so that (if need-be)
# the file can actually be -created- without being tracked.
fi
virtual_curdirec="$(perl "${pubresdir}/ins-pl/virtual-cur-dir.pl" "${curdirec}")"
(
echo "#! $(which perl)"
echo "use strict;"
#echo "my \$resloc = \"${curdirec}\";"
echo "my \$resloc = \"${virtual_curdirec}\";"
#echo "# line 1 \"${pubresdir}/outer-wrap.qpl-then\""
echo "# line 1 \"${pubresdir}/outer-wrap.qpl-then\""
cat "${pubresdir}/outer-wrap.qpl"
) > tmp/${fildesnom}
chmod 755 tmp/${fildesnom}
perl -c tmp/${fildesnom} || exit 2
# Resolve Jails and Prefixes
foundo="$(perl "${pubresdir}/find-above.pl" ins-opt-code/dir-of-install-jail.txt x install.sh)"
if [ "$foundo" != "x" ]; then
CHOBAK_INSTALL_JAIL="$(cat "${foundo}")"
export CHOBAK_INSTALL_JAIL
CHOBAK_INSTALL_PREFIX="/usr/local"
export CHOBAK_INSTALL_PREFIX
fi
foundo="$(perl "${pubresdir}/find-above.pl" ins-opt-code/dir-of-install-prefix.txt x install.sh)"
if [ "$foundo" != "x" ]; then
CHOBAK_INSTALL_PREFIX="$(cat "${foundo}")"
export CHOBAK_INSTALL_PREFIX
fi
# Find the path-install location
destina='x'
onetype='cmd'
if [ $projtype = $onetype ]; then
destina="${CHOBAK_INSTALL_PREFIX}/bin"
# Allow overriding of default:
#if [ -f "ins-opt-code/dir-of-install.txt" ]; then
foundo="$(perl "${pubresdir}/find-above.pl" ins-opt-code/dir-of-install.txt x install.sh)"
if [ "$foundo" != "x" ]; then
destina="$(cat "${foundo}")"
fi
fi
onetype='scrip-tll'
if [ $projtype = $onetype ]; then
destina="${CHOBAK_INSTALL_PREFIX}/scriptools"
foundo="$(perl "${pubresdir}/find-above.pl" ins-opt-code/dir-of-install-scrip-tll.txt x install.sh)"
if [ "$foundo" != "x" ]; then
destina="$(cat "${foundo}")"
fi
fi
onetype='x'
if [ $destina = $onetype ]; then
(
echo
echo "Could not find project-type: ${projtype}:"
echo
) 1>&2
fi
#perl "${pubresdir}/ins-pl/diffcp.pl" "tmp/${fildesnom}" "${CHOBAK_INSTALL_JAIL}${destina}/."
#chmod 755 "${CHOBAK_INSTALL_JAIL}${destina}/${fildesnom}"
rm -rf "${CHOBAK_INSTALL_JAIL}${destina}/${fildesnom}"
perl "${pubresdir}/man-uninstall.pl" "${pubresdir}"
rm -rf tmp
if [ -f "after-uninstall.sh" ]; then
exec sh after-uninstall.sh
fi
# Prepare environment for extra compilation instructions:
MY_DESTINA_BIN="${destina}"
export MY_DESTINA_BIN
if [ -f "extra-uninstall.sh" ]; then
exec sh extra-uninstall.sh
fi
|
sophia-collaborations/chobakwrap
|
pub-res/uninstall.sh
|
Shell
|
gpl-2.0
| 4,191 |
#!/usr/bin/env bash
DATADIR=""
BACKUPDIR="/tmp/backup"
CONFIGFILE="/etc/mongod.conf"
BACKUP_CONFIGFILE="/tmp/mongod.conf.backup"
LOG="/tmp/psmdb_run.log"
SLES=0
if [ -f /etc/os-release ]; then
SLES=$(cat /etc/os-release | grep -c '^NAME=\"SLES' || true)
fi
if [ -f /etc/redhat-release -o ${SLES} -eq 1 ]; then
DATADIR="/var/lib/mongo"
else
DATADIR="/var/lib/mongodb"
fi
function start_service {
local redhatrelease=""
if [ -f /etc/redhat-release ]; then
redhatrelease=$(cat /etc/redhat-release | grep -o '[0-9]' | head -n 1)
fi
local lsbrelease=$(lsb_release -sc 2>/dev/null || echo "")
if [ "${lsbrelease}" != "" -a "${lsbrelease}" = "trusty" ]; then
echo "starting mongod service directly with init script..."
/etc/init.d/mongod start
elif [ "${redhatrelease}" = "5" ]; then
echo "starting mongod service directly with init script..."
/etc/init.d/mongod start
elif [ "${lsbrelease}" != "" -a ${SLES} -eq 1 ]; then
echo "starting mongod with /sbin/service on SLES..."
/sbin/service mongod start
else
echo "starting mongod service... "
service mongod start
fi
echo "waiting 10s for service to boot up"
sleep 10
}
function stop_service {
local redhatrelease=""
if [ -f /etc/redhat-release ]; then
redhatrelease=$(cat /etc/redhat-release | grep -o '[0-9]' | head -n 1)
fi
local lsbrelease=$(lsb_release -sc 2>/dev/null || echo "")
if [ "${lsbrelease}" != "" -a "${lsbrelease}" = "trusty" ]; then
echo "stopping mongod service directly with init script..."
/etc/init.d/mongod stop
elif [ "${redhatrelease}" = "5" ]; then
echo "stopping mongod service directly with init script..."
/etc/init.d/mongod stop
elif [ "${lsbrelease}" != "" -a ${SLES} -eq 1 ]; then
echo "stopping mongod with /sbin/service on SLES..."
/sbin/service mongod stop
else
echo "stopping mongod service... "
service mongod stop
fi
echo "waiting 10s for service to stop"
sleep 10
}
function list_data {
echo -e "listing files in datadir...\n"
ls -alh ${DATADIR}/ >> ${LOG}
}
function clean_datadir {
echo -e "removing the data files...\n"
rm -rf ${DATADIR}/*
}
function test_hotbackup {
MD5_BEFORE=$(mongo localhost:27017/test --quiet --eval "db.runCommand({ dbHash: 1 }).md5" | tail -n1)
rm -rf ${BACKUPDIR}
mkdir -p ${BACKUPDIR}
chown mongod:mongod -R ${BACKUPDIR}
BACKUP_RET=$(mongo localhost:27017/admin --eval "db.runCommand({createBackup: 1, backupDir: '${BACKUPDIR}'})"|grep -c '"ok" : 1')
if [ ${BACKUP_RET} = 0 ]; then
echo "Backup failed for storage engine: ${engine}" | tee -a ${LOG}
exit 1
fi
stop_service
clean_datadir
cp -r ${BACKUPDIR}/* ${DATADIR}/
chown -R mongod:mongod ${DATADIR}
start_service
MD5_AFTER=$(mongo localhost:27017/test --quiet --eval "db.runCommand({ dbHash: 1 }).md5" | tail -n1)
if [ "${MD5_BEFORE}" != "${MD5_AFTER}" ]; then
echo "ERROR: dbHash before and after hotbackup are not the same!" | tee -a ${LOG}
exit 1
else
echo "dbHash is the same before and after hotbackup: ${MD5_BEFORE}:${MD5_AFTER}" | tee -a ${LOG}
fi
rm -rf ${BACKUPDIR}
}
function check_rocksdb {
ROCKSDB_LOG_FILE="${DATADIR}/db/LOG"
# Check RocksDB library version
ROCKSDB_VERSION=$(grep "RocksDB version" ${ROCKSDB_LOG_FILE}|tail -n1|grep -Eo "[0-9]+\.[0-9]+(\.[0-9]+)*$")
if [ "${VERSION}" == "3.0" ]; then
ROCKSDB_VERSION_NEEDED=${PSMDB30_ROCKSDB_VER}
elif [ "${VERSION}" == "3.2" ]; then
ROCKSDB_VERSION_NEEDED=${PSMDB32_ROCKSDB_VER}
elif [ "${VERSION}" == "3.4" ]; then
ROCKSDB_VERSION_NEEDED=${PSMDB34_ROCKSDB_VER}
elif [ "${VERSION}" == "3.6" ]; then
ROCKSDB_VERSION_NEEDED=${PSMDB36_ROCKSDB_VER}
else
echo "Wrong parameter to script: $1"
exit 1
fi
if [ "${ROCKSDB_VERSION}" != "${ROCKSDB_VERSION_NEEDED}" ]; then
echo "Wrong version of RocksDB library! Needed: ${ROCKSDB_VERSION_NEEDED} got: ${ROCKSDB_VERSION}"
exit 1
fi
# Check RocksDB supported compression libraries
COMP_LIB_SNAPPY=$(grep "Snappy supported: 1" ${ROCKSDB_LOG_FILE}|wc -l)
COMP_LIB_ZLIB=$(grep "Zlib supported: 1" ${ROCKSDB_LOG_FILE}|wc -l)
COMP_LIB_BZIP=$(grep "Bzip supported: 1" ${ROCKSDB_LOG_FILE}|wc -l)
COMP_LIB_LZ4=$(grep "LZ4 supported: 1" ${ROCKSDB_LOG_FILE}|wc -l)
if [ ${COMP_LIB_SNAPPY} -lt 1 -o ${COMP_LIB_ZLIB} -lt 1 -o ${COMP_LIB_BZIP} -lt 1 -o ${COMP_LIB_LZ4} -lt 1 ]; then
echo "Error when checking compression libraries in RocksDB."
echo "Snappy: ${COMP_LIB_SNAPPY}"
echo "Zlib: ${COMP_LIB_ZLIB}"
echo "Bzip: ${COMP_LIB_BZIP}"
echo "LZ4: ${COMP_LIB_LZ4}"
exit 1
fi
# Check RocksDB support for FastCRC
FAST_CRC=$(grep "Fast CRC32 supported: 1" ${ROCKSDB_LOG_FILE}|wc -l)
if [ ${FAST_CRC} -lt 1 ]; then
echo "FastCRC is not enabled for MongoRocks."
echo "$(grep "Fast CRC32 supported" ${ROCKSDB_LOG_FILE})"
exit 1
fi
}
|
Percona-QA/package-testing
|
scripts/psmdb_common.sh
|
Shell
|
gpl-2.0
| 4,891 |
#!/bin/sh
# ------------------------------------------------------------------------------
# --- Run sampe to generate SAM file
# ------------------------------------------------------------------------------
# Check that genome FASTA and genome code were passed as parameters
USAGE="$0 genome.fasta genome_code";
if [ -z "$2" ]; then
echo "ERROR: $USAGE";
exit 1;
fi
# Strip ending from $1 (fasta)
GENOME_PATH=$(echo $1 | sed 's/.[^.]*$//g')
GENOME_CODE=$2
$BWA/bwa sampe -A \
$GENOME_PATH \
results/${IND_ID}.read1.bwa.${GENOME_CODE}.sai \
results/${IND_ID}.read2.bwa.${GENOME_CODE}.sai \
$READ1 \
$READ2 > results/${IND_ID}.PE.bwa.${GENOME_CODE}.sam
echo results/${IND_ID}.PE.bwa.${GENOME_CODE}.sam
exit;
|
bergeycm/NGS-map
|
scripts/sampe.sh
|
Shell
|
gpl-2.0
| 721 |
#!/bin/bash
docker run -v "$(pwd):/build" wavbreaker-win32-build \
sh -e -x scripts/win32/run-build-inside.sh
|
thp/wavbreaker
|
scripts/win32/run-build.sh
|
Shell
|
gpl-2.0
| 114 |
#! /bin/sh
. ../../testenv.sh
for t in physical_division; do
synth $t.vhdl -e $t > syn_$t.vhdl
analyze syn_$t.vhdl
clean
done
echo "Test successful"
|
tgingold/ghdl
|
testsuite/synth/physical01/testsuite.sh
|
Shell
|
gpl-2.0
| 164 |
#!/bin/bash
#
# this script must be run from the woo/nsis directory
#
set -e -x
# must be absolute path where pyinstaller's-generated executable is
if [ -d /boot ]; then
# linux
BINDIR=/media/eudoxos/WIN7/src/woo/dist/wwoo-win64
else
# windows
BINDIR=/c/src/woo/dist/wwoo-win64
fi
DO_LIBS=false
DO_WOO=false
DO_EXTRA=false
DO_UPLOAD=false
while getopts ":lweu" opt; do
case $opt in
l) DO_LIBS=true ;;
w) DO_WOO=true ;;
e) DO_EXTRA=true ;;
u) DO_UPLOAD=true ;;
esac
done
REVNO=`git rev-list HEAD --count 2>/dev/null`-git-`git log -1 --format='%h' | head -n1`
echo $REVNO
DESTDIR=$BINDIR/..
cp *.nsh $BINDIR/
cp *.rtf $BINDIR/
rm -f $BINDIR/*-installer.exe
if $DO_EXTRA; then
# copy eggs to $BINDIR; they are not installed with other installers, so it is safe to have them there
rm -f $BINDIR/wooExtra*.egg
for d in ../wooExtra/*; do
echo $d
[ -f $d/setup.py ] || continue
pushd $d
rm -rf dist/*.egg
python setup.py bdist_egg
cp dist/*.egg $BINDIR
popd
done
fi
echo 'HERE'
if [ -d /boot ]; then
# linux
pushd $BINDIR
if $DO_LIBS; then makensis -DVERSION=1.0e nsis-wwoo-libs.nsh; fi
if $DO_WOO; then makensis -DVERSION=1.0-r$REVNO nsis-wwoo-main.nsh; fi
# make installers for extra modules
if $DO_EXTRA; then
for EGG in wooExtra.*.egg; do
COMPONENT=`echo $EGG | cut -f1 -d-`
VERSION=`echo $EGG | cut -f2 -d-`
makensis -DCOMPONENT=$COMPONENT -DVERSION=$VERSION nsis-wwoo-extra.nsh
if $DO_UPLOAD; then
KEY=`python -c "import $COMPONENT; print $COMPONENT.KEY" || true`
scp $EGG Woo-$COMPONENT-*-installer.exe bbeta:host/woodem/private/$KEY/inst/
ssh bbeta chmod ug+r host/woodem/private/$KEY/inst/*.{exe,egg}
fi
done
fi
popd
else
# windows is so singularly ugly
MAKENSIS='/c/Program Files (x86)/NSIS/makensis.exe'
pushd $BINDIR
# work around msys expanding /D: http://forums.winamp.com/showthread.php?t=253732
# omg
if $DO_LIBS; then
echo "!define VERSION 1.0e" > defines.nsh
"$MAKENSIS" defines.nsh nsis-wwoo-libs.nsh
fi
if $DO_WOO; then
echo "!define VERSION 0.99-r$REVNO" > defines.nsh
"$MAKENSIS" defines.nsh nsis-wwoo-main.nsh
fi
if $DO_EXTRA; then
for EGG in wooExtra.*.egg; do
COMPONENT=`echo $EGG | cut -f1 -d-`
VERSION=`echo $EGG | cut -f2 -d-`
echo "!define COMPONENT $COMPONENT" > defines.nsh
echo "!define VERSION $VERSION" >>defines.nsh
"$MAKENSIS" defines.nsh nsis-wwoo-extra.nsh
done
fi
popd
fi
mv $BINDIR/*-installer.exe $DESTDIR
|
gladk/woodem
|
nsis/nsis-runall.sh
|
Shell
|
gpl-2.0
| 2,498 |
#!/bin/bash
# File: newAVRproject.sh
# Date: 14.10.2019 12:52
# Author: Marek Nožka, marek <@t> tlapicka <d.t> net
# Licence: GNU/GPL
# Task:
############################################################
git clone https://github.com/spseol/AVRtools.git $1
|
spseol/udelatka
|
bin/newAVRproject.sh
|
Shell
|
gpl-2.0
| 268 |
#!/bin/bash
# List compiled from:
# Slate: http://goo.gl/vPC4X4
# Wikipedia: http://goo.gl/mdcyT8
swear_words=(
'shit' 'fuck' 'damn' 'bitch' 'crap' 'dick' 'bitch' 'crap'
'dick' 'piss' 'pussy' 'fag' 'asshole' 'cock' 'bastard' 'darn'
'douche' 'slut' 'whore' 'christ' 'jesus' 'arse' 'bloody' 'bollocks'
)
if [[ -f "$1" ]]; then
echo -e "\t\nCount for swear words in $1:\n\t"
printf "%-10s %s\n" "Word:" "Count:"
echo "------------------"
for w in "${swear_words[@]}"; do
n=$(egrep -o -i $w "$1" | wc -w)
printf "%-10s %s\n" $w $n
done
else
echo "Error: $1 is not a valid file"
exit 1
fi
exit 0
|
bhalash/Fun
|
Shell/swearwords.sh
|
Shell
|
gpl-2.0
| 650 |
#!/bin/sh
set -xve
while read -r line; do
tag -a $line
done < "$1"
|
iamthad/redditarchiver
|
mktags.sh
|
Shell
|
gpl-3.0
| 69 |
#!/bin/bash
# 文件名: ss-local.sh
# 描述: 用于CentOs7 系统,安装ShadowSocks的客户端。
# 版本: 1.0
# 创建时间: 2016年11月04日
# 修订: None
# 作者: Selphia (sp), [email protected]
### 检测是否安装GCC
gcc --version
if [ $? != 0 ]
then
echo 'Before you proceed, make sure that your system has a C compiler'
exit 0
fi
# 显示python版本
echo "Python3 Version:"
python3 -V
if [ $? != '0' ]
then
echo 'Before you proceed, make sure that your system has a python3'
exit 0
fi
### 设置变量
LATEST='libsodium-*'
echo 'Server IP address'
read server_ip
echo 'Port number to be used'
read server_port
echo 'ShadowSocks password'
read password
echo 'In the end, you should choose to use the form of encryption that meets your security requirements. a/c'
echo '(a) aes-256-cfb'
echo '(c) chacha20'
read method_
method='Null'
while [ $method == 'Null' ]
do
if [ $method_ == 'a' ]
then
echo 'Encryption method : aes-256-cfb'
method=aes-256-cfb
elif [ $method_ == 'c' ]
then
echo 'Encryption method : chacha20'
method=chacha20
else
echo -e "\033[1;31mEnter the error, please enter the correct encryption method\033[0m a/c"
read method_
fi
done
# 安装ShadowSocks
pip3 install shadowsocks
# 配置文件config.json
mkdir /etc/shadowsocks
echo "{
\"server\":\"$server_ip\",
\"server_port\":$server_port,
\"local_address\": \"127.0.0.1\",
\"local_port\":1080,
\"password\":\"$password\",
\"timeout\":300,
\"method\":\"$method\",
\"fast_open\": false
}" > /etc/shadowsocks/config.json
# 使用chacha20加密协议
wget https://download.libsodium.org/libsodium/releases/LATEST.tar.gz
tar -xvf LATEST.tar.gz
cd ./$LATEST
./configure
make all
make install
echo /usr/local/lib > /etc/ld.so.conf.d/usr_local_lib.conf
ldconfig
cd ..
rm -rf LATEST.tar.gz
rm -rf $LATEST
# 使用aes-256-cfb加密协议
yum install m2crypto -y
# 创建链接到/usr/bin
ln -sf /usr/local/python3/bin/ssserver /usr/bin
ln -sf /usr/local/python3/bin/sslocal /usr/bin
# 设置开机启动
echo "sslocal -c /etc/shadowsocks/config.json -d start" >> /etc/rc.d/rc.local
chmod a+x /etc/rc.d/rc.local
# 程序结束
sslocal -c /etc/shadowsocks/config.json -d start
echo 'If the installation was successful,ShadowSocks should now be running on your system.'
|
selphia/ManagementScript
|
ss-local.sh
|
Shell
|
gpl-3.0
| 2,297 |
#!/bin/sh
copyrightNotice() cat << EOF
BEGIN COPYRIGHT NOTICE
This file is part of program "Podoscopista"
Copyright 2012 Rodrigo Lemos
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
END COPYRIGHT NOTICE
EOF
# the following files should not have copyright notice
# (generally because they are distributed under another license)
EXCLUSIONS=`cat << EOF
COPYING
applylicense.sh
EOF`
NOTICEMARKERSREGEXP="\(BEGIN\|END\) COPYRIGHT NOTICE"
HASHNOTICE="`mktemp -t noticeXXXXX`"
JAVANOTICE="`mktemp -t noticeXXXXX`"
XMLNOTICE="`mktemp -t noticeXXXXX`"
MDNOTICE="`mktemp -t noticeXXXXX`"
trap "rm -fR $HASHNOTICE $XMLNOTICE $JAVANOTICE $MDNOTICE" exit
(
copyrightNotice | sed -e 's/^/# /'
) > "$HASHNOTICE"
(
echo "<!--"
copyrightNotice | sed -e 's/^/ /'
echo "-->"
) > "$XMLNOTICE"
(
echo ""
echo "--------------------------------------------------------------------------------"
copyrightNotice | sed -e 's/^/ /'
echo ""
echo "--------------------------------------------------------------------------------"
) > "$MDNOTICE"
(
head -c 80 < /dev/zero | tr '\0' '*' | sed -e 's/^*/\//' -e 's/$/\n/'
copyrightNotice | sed -e 's/^/ * /'
head -c 80 < /dev/zero | tr '\0' '*' | sed -e 's/^*/ /' -e 's/*$/\/\n/'
) > "$JAVANOTICE"
findPreviousLicense() {
FILE="$1"
GREPOUTPUT=`grep "$NOTICEMARKERSREGEXP" -Zno "$FILE"` || return 1;
echo "$GREPOUTPUT" | sed -e "s/:$NOTICEMARKERSREGEXP//g" | tr "\n" " "
}
stripPreviousLicense() {
FILE="$1"
INCRBEGIN="$2"
INCREND="$3"
LINES="`findPreviousLicense "$FILE"`" || return
set -- $LINES; BEGIN="$1"; END="$2"
BEGIN="$(($BEGIN $INCRBEGIN))"
END="$(($END $INCREND))"
sed -e "${BEGIN},${END}d" -i "$FILE"
}
stuffFirstLine() {
FILE="$1"
sed -i "$FILE" -f - << EOF
1i\
_
EOF
}
applyJava() {
FILE="$1"
stripPreviousLicense "$FILE" "-1" "+1"
stuffFirstLine "$FILE"
sed -i "$FILE" -e "1r $JAVANOTICE" -e "1d"
}
applyXML() {
FILE="$1"
stripPreviousLicense "$FILE" "-1" "+1"
# aaa aaa dd dd:dd:dd aaa dddd
if (head -n 1 "$FILE" | grep -q "<?") then
sed -i "$FILE" -e "1r $XMLNOTICE"
else
stuffFirstLine "$FILE"
sed -i "$FILE" -e "1r $XMLNOTICE" -e "1d"
fi
}
applyMD() {
FILE="$1"
stripPreviousLicense "$FILE" "-2" "+2"
cat "$MDNOTICE" >> "$FILE"
}
applyHash() {
FILE="$1"
stripPreviousLicense "$FILE"
# aaa aaa dd dd:dd:dd aaa dddd
if (head -n 1 "$FILE" | grep -q "^#... ... .. ..:..:.. ..S\?. ....$") then
sed -i "$FILE" -e "1r $HASHNOTICE"
elif (head -n 1 "$FILE" | grep -q "^#!") then
sed -i "$FILE" -e "1r $HASHNOTICE"
else
stuffFirstLine "$FILE"
sed -i "$FILE" -e "1r $HASHNOTICE" -e "1d"
fi
}
EXCLUSIONS="`echo "$EXCLUSIONS" | sed -e "s|^|./|"`"
find \( -path './.git' -o -path '*/target' \) -prune -o -type f -print | grep -vF "$EXCLUSIONS" |
while read FILE
do
case "`basename "$FILE"`" in
*.java | *.aj)
applyJava "$FILE"
;;
*.xml | *.xsd)
applyXML "$FILE"
;;
*.md)
applyMD "$FILE"
;;
*)
MAGIC="`file -b "$FILE"`"
case "$MAGIC" in
"XML document text" | "XML document text")
applyXML "$FILE"
;;
"ASCII text" | "POSIX shell script text executable")
applyHash "$FILE"
;;
*)
# don't know
echo "$FILE: $MAGIC"
;;
esac
;;
esac
done
|
rslemos/podoscopista
|
applylicense.sh
|
Shell
|
gpl-3.0
| 3,797 |
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
pushd $DIR
cmd='./../build/bin/geth attach'
eval $cmd
popd
|
crazyquark/go-ethereum
|
run/attachGeth.sh
|
Shell
|
gpl-3.0
| 129 |
#!/bin/bash
echo "$(date): Setting up demo environment."
echo "$(date): Creating project: demo-abtesting."
oc new-project demo-abtesting --description "Demo of AB testing using PHP 7.2 and 7.0" --display-name "Demo - AB testing"
oc project demo-abtesting
echo "$(date): Creating an PHP 7.2 application."
oc new-app openshift/php:7.2~https://github.com/mglantz/ocp-php.git --name=php72
oc expose service php72
echo "$(date): Creating an PHP 5.6 application"
oc new-app openshift/php:7.0~https://github.com/mglantz/ocp-php.git --name=php70
oc expose service php70
echo "$(date): Creating an AB route"
oc expose service php72 --name='ab-php' -l name='ab-php'
echo "$(date): Configuring load balancing between the two applications."
oc set route-backends ab-php php72=10 php70=90
oc annotate route/ab-php haproxy.router.openshift.io/balance=static-rr
echo "$(date): Waiting for the php applications to build and deploy. This may take a bit: "
while true; do
if oc get builds|egrep '(php70|php72)'|grep Running|wc -l|grep 0 >/dev/null; then
if oc get pods|egrep '(php70|php72)'|grep Running|grep "1/1"|wc -l|grep 2 >/dev/null; then
echo "$(date): Applications will now be up in a couple of seconds."
break
fi
fi
sleep 1
done
|
mglantz/openshift-demos
|
source/abtesting/install.sh
|
Shell
|
gpl-3.0
| 1,241 |
#!/bin/bash
# PHP5
##########################
source setup/functions.sh # load our functions
source /etc/xenmailserver.conf # load global vars
# ### Removing PHP7.0
#echo "Removing all PHP packages..."
#apt-get remove php* > /dev/null 2>&1
#apt-get purge php* > /dev/null 2>&1
#apt-get autoremove > /dev/null 2>&1
#apt-get autoclean > /dev/null 2>&1
rm -rf /etc/apt/sources.list.d/ondrej-*
apt_install python-software-properties
echo "Adding PHP5/5.6/7.0 Repo of Ondrej..."
# ### Add Repository
add-apt-repository -y ppa:ondrej/php > /dev/null 2>&1
hide_output apt-get update
# ### Install needed PHP packages for the script
echo "Installing needed PHP packages..."
echo "5.6..."
apt_install php5.6 php5.6-dev php5.6-soap php5.6-imap php5.6-xsl php5.6-cli php5.6-sqlite php5.6-gd php5.6-curl php5.6-fpm php5.6-memcached \
php5.6-mcrypt php5.6-intl php5.6-json php5.6-common php5.6-apcu php5.6-zip php5.6-mbstring php5.6-pspell
apt_install curl dbconfig-common memcached unzip libapr1 libtool libcurl4-openssl-dev tinymce libjs-jquery libjs-jquery-mousewheel libmagic1
echo "7.0..."
apt_install php7.0 php7.0-dev php7.0-soap php7.0-imap php7.0-xsl php7.0-cli php7.0-sqlite php7.0-gd php7.0-curl php7.0-fpm php7.0-memcached \
php7.0-mcrypt php7.0-intl php7.0-json php7.0-common php7.0-apcu php7.0-zip php7.0-mbstring php7.0-pspell \
php-pear php-xml-parser php-auth php-net-smtp php-net-socket php-net-sieve php-mail-mime php-crypt-gpg
phpenmod mcrypt
phpenmod imap
echo "linking php5.6"
ln -sfn /usr/bin/php5.6 /etc/alternatives/php
|
realizelol/xenmailserv
|
setup/php.sh
|
Shell
|
gpl-3.0
| 1,544 |
#!/bin/sh
echo enter youtube URL
read URL
echo enter format
read FORMAT
(youtube-dl -f $FORMAT --no-warnings -o /root/my-applications/playYT/playYT.$FORMAT $URL)
mplayer /root/my-applications/playYT/playYT.$FORMAT
rm /root/my-applications/playYT/playYT.$FORMAT
|
ThomasTheSpaceFox/GTKtools
|
BASHTEST1/bashtest2.sh
|
Shell
|
gpl-3.0
| 262 |
#!/bin/sh
curl http://www.public.asu.edu/~lwolf2/data/example_data.tar.gz > example_data.tar.gz && tar -zxvf example_data.tar.gz
|
ljwolf/pyMaxFlow
|
getdata.sh
|
Shell
|
gpl-3.0
| 129 |
#! /usr/bin/bash
# Script: csv.bash
# Author: Ben Altman (csv.feedback.benalt at xoxy.net)
# Description: Convert csv files from Excel in to a format using a different separator.
#
# Usage: csv.bash [options] csv_file
# -i csv_file_field_separator (default is comma)
# -o output_field_separator (default is pipe)
# -l -m -n multiline_field_separator (default is "\n")
usage() {
[[ -n $1 ]] && echo "$1\n" >&2
cat <<-xx >&2
Usage: ${0##*/} [options] [csv_file(s)]
-i csv_file_field_separator (default is comma)
-l, -m, -n multiline_field_separator (default is "\n")
-o output_field_separator (default is pipe)
xx
exit 1
}
ifs=, ofs=\| nl='\\n' # defaults
while getopts :i:o:m:n:l: opt; do
case $opt in
i) ifs=$OPTARG;;
o) ofs=$OPTARG;;
[mnl]) nl=$OPTARG;;
*) usage;;
esac
done
shift $((OPTIND-1))
awk -vfs="$ifs" -vofs="$ofs" -vnl="$nl" '
# does the field end in an even number of double quotes?
function evenqq(field) {
# return ! ((length(field) - length(gensub("\"*$","",1,field))) % 2) # gawk only
f2=field; gsub("\"*$","",f2)
return ! ((length(field) - length(f2)) % 2)
}
BEGIN {
if (! nl) nl = "\\n"
if (! fs) FS = ","; else FS = fs
if (! ofs) OFS = "|"; else OFS=ofs
# above line backs up FS for when rejoining fields containing FS in the case when it`s a pipe. Don`t join with [|] per the below
gsub("[|]","[|]",FS) # Input separator containing a pipe replace "|" with "[|]"
}
{
# i points to the field we are joining to. j points to the field that may need to join to i.
# n: number of fields in the current line being processed.
# nf: tally of the number of fields including lines read for multi-line fields that have
# more fields on those lines to complete the record.
i=1; j=2
n = nf = split($0, field)
# Exceptional even double quoted field where then entire field is "" is equal to blank without the double quotes
if ($i == "\"\"") $i=""
while (j <= n+1) {
# 1. A field with no DQs is a simple field and we can move straight to the next field.
# 2. A field starting with a DQ can contain DQs, FS or multiple lines which we need to join.
# If the field is not a simple field with no DQs at all
if (substr($i,1,1) == "\"") {
# while the field ENDS in even or no DQs it is joined to the next field, until odd quotes are joined ending the field...
while (evenqq($i))
{
if (j <= n) { # join with this line (fs)
$i = $i fs field[j++]
nf--
} else { # join with next line (nl)
getline line
n = split(line, field)
nf += n - 1
$i = $i nl field[1]
j = 2
}
}
# Remove surrounding DQs and remove escape DQ character from DQs.
gsub("^\"|\"$","",$i); gsub("\"\"","\"",$i)
}
# Next field[i] is overwritten by current field[j] and move j pointer to next field that might need to be joined
$(++i) = field[j++]
}
NF = nf
print
}' "$@"
|
benalt613/csv
|
csv.bash
|
Shell
|
gpl-3.0
| 2,927 |
#!/bin/bash
set -e
echo "Installing build dependencies..."
sudo apt-get install -y libc6-dev-i386 >/dev/null
ARCHS[0]=386
ARCHS[1]=amd64
#ARCHS[2]=arm
OS=linux
#VERSION=$(git describe --tags)
VERSION=$TRAVIS_TAG
DIR=$(dirname $(cd $(dirname "${BASH_SOURCE[0]}") && pwd))
NAME=$(basename $DIR)
TMP_DIR="$DIR/tmp"
mkdir -p $DIR/release
for ARCH in "${ARCHS[@]}"; do
echo "Building $ARCH release..."
cd $DIR
if [[ $ARCH == "386" ]]; then
# CGO_ENABLED=1 required for sqlite
env CGO_ENABLED=1 GOOS=$OS GOARCH=$ARCH go build
else
env GOOS=$OS GOARCH=$ARCH go build
fi
CURRENT=$NAME-$OS-$ARCH-$VERSION
CURRENT_DIR=$TMP_DIR/$CURRENT
mkdir -p $CURRENT_DIR
cd $CURRENT_DIR
mv $DIR/$NAME ./
cp $DIR/config.example.yml ./
cp -r $DIR/assets ./
cp -r $DIR/install ./
tar -zcvf "$DIR/release/$CURRENT.tar.gz" *
done
|
abeMedia/ira
|
scripts/release.sh
|
Shell
|
gpl-3.0
| 889 |
#!/bin/bash
python AnalyzeSimulation.py --paralog1 YMR143W --paralog2 YDL083C --simnum 97 > YMR143W_YDL083C_MG94_nonclock_Sim97_PrintScreen.txt
|
xjw1001001/IGCexpansion
|
Simulation/ShFiles/MG94_YMR143W_YDL083C_sim97.sh
|
Shell
|
gpl-3.0
| 145 |
#!/bin/sh
set -e # Any commands which fail will cause the script to exit
set -x # Verbose script
# find out srcs path:
SCRIPTS_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
BRANCH_NAME="master"
# Regenerate doxygen docs:
cd "${SCRIPTS_DIR}/../libldaq/"
doxygen
cd "${SCRIPTS_DIR}/../"
git checkout gh-pages
set +e
rm -fr "${SCRIPTS_DIR}/../${BRANCH_NAME}"
mv "${SCRIPTS_DIR}/../libldaq/html" "${SCRIPTS_DIR}/../${BRANCH_NAME}"
git add "${SCRIPTS_DIR}/../${BRANCH_NAME}"
git commit -a
git checkout ${BRANCH_NAME}
|
LibreDAQ/libredaq
|
scripts/rebuild_doxygen_docs.sh
|
Shell
|
gpl-3.0
| 526 |
#!/bin/bash
# USAGE: ./whole_intron_fasta.sh upstream_introns.bed downstream_introns.bed genome.fa
#wget -c http://hgdownload.cse.ucsc.edu/goldenPath/ce11/bigZips/chromFa.tar.gz -O - | gzip -dc | tar -xO > ce11.fa
upstream=$1
downstream=$2
genome=$3
bedtools getfasta -fi $genome -bed $upstream -s -name > left_introns.fa
bedtools getfasta -fi $genome -bed $downstream -s -name > right_introns.fa
|
alexandruioanvoda/autoBLAST
|
whole_intron_fasta.sh
|
Shell
|
gpl-3.0
| 399 |
#!/bin/bash
# shellcheck disable=SC2034
#--
# M A I N
#--
_parse_arg "$@"
APP_DESC='Administration script'
_rks_app "$0" "$@"
if test -s ../phplib/bin/toggle; then
PATH_PHPLIB=$(realpath ../phplib)
elif test -s ../../bin/toggle; then
PATH_PHPLIB=$(realpath ../..)
fi
case ${ARG[1]} in
build)
build;;
docs)
docs;;
docker_osx)
docker_osx;;
php)
do_php "${ARG[2]}";;
test)
php test/run.php;;
ubuntu)
ubuntu;;
*)
_syntax "build|docs|docker_osx|php|test|ubuntu"
esac
|
RolandKujundzic/rkphplib
|
sh/run/main.sh
|
Shell
|
gpl-3.0
| 490 |
#!/bin/bash
set -i
set -e
killall lxpanel
find ~/.cache/menus -name '*' -type f -delete
nohup lxpanel -p LXDE > /dev/null &
|
bboortz/archlinux-scripts
|
lxde_tools/restart_lxpanel.sh
|
Shell
|
gpl-3.0
| 126 |
#!/bin/sh
# P01 - Script de finalizacion de la practica
# Realiza las operaciones llevadas a cabo en la practica, dejando
# el equipo en el estado en que debe situarse tras finalizarla.
# Al terminar la practica P01 el equipo debe estar en el mismo estado que antes de realizarla
ACTUAL=`pwd` && cd `dirname $0` && ORIGEN=`pwd` && cd ${ACTUAL}
DIRRAIZ=${ORIGEN}"/"
$DIRRAIZ/P01inicio.sh
|
carrodher/teleco
|
STA/P01/P01/P01final.sh
|
Shell
|
gpl-3.0
| 389 |
#!/bin/bash
# ----------------------------------------------------------------------
#
# Author: Myrto C Kontouli
# 999Design.com
# Date: 18/May/2017
# Last Updated: 28/June/2017
#
# ----------------------------------------------------------------------
#
# Backup wrapper/handler
#
# Backs up target directory and its contents, as well as whole of target database, in an incremental manner.
# Settings for directories and databases are in this file.
# Uses hard links to save space.
# Makes use of tmp files which are deleted at end of execution.
# Emails your preferred email at the end of completion.
#
#
# ----------------------------------------------------------------------
# Options
#
# -v Verbose output
#
# ----------------------------------------------------------------------
set -e # Temporary error handling. (Quit script on error)
# Move to directory where the script is at.
# This is added to make it possible to summon the script from a different path and still have the sources etc work properly ( even when relative).
cd "$(dirname "${BASH_SOURCE[0]}")"
#Myrto: make it log the echoes to tmp.
OUTPUT_TMP=$(mktemp -q tmp.XXXXXX)
chmod 0600 "$OUTPUT_TMP"
exec &> "$OUTPUT_TMP"
#Emailer
out2email() {
if [ ! -z "$CONTACT_EMAIL" ]
then
cat "$OUTPUT_TMP" | mail -s "[$(hostname)] BACKUP RUN $(date +%A)" "$CONTACT_EMAIL"
fi
}
#Backtrace generator
#Creds: https://stackoverflow.com/q/5811002
backtrace () {
echo "Backtrace is:"
i=1
while caller $i
do
i=$((i+1))
done
}
#Error reporting handling
#Creds: https://stackoverflow.com/a/185900
error() {
local message="$1"
local code="${2:-1}"
if [[ -n "$message" ]] ; then
echo "Error! ${message} Exiting with status ${code}"
backtrace
else
echo "Error! Exiting with status ${code}"
backtrace
fi
echo "----------BACKUP EXITED WITH ERROR on $(date +%Y-%m-%d_%T)-------------"
echo ""
exit "${code}"
}
finish() {
#LOG append
LOG_FILE="bkup.log"
[ -f "$LOG_FILE" ] || touch "$LOG_FILE"
cat "$OUTPUT_TMP" >> "$LOG_FILE"
#send email
out2email
#Clean up tmp files end of script
rm -f tmp.*
}
set +e # End of: Temporary error handling.
trap finish EXIT
trap error ERR
#!!!!!!
#------------- ERROR REPORTING ACTIVE HERE ON OUT! -------------
#!!!!!!
echo ""
echo "----------BACKUP STARTED on $(date +%Y-%m-%d_%T)-------------"
#Set up settings
source ./bkup_settings.sh
while getopts ":v" opt
do
case $opt in
v)
v=true
echo "-v was triggered"
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit
;;
esac
done
if [ -z "$NOCODE_SWITCH" ]
then
#Backup files
source ./bkup_code.sh
fi
if [ -z "$NODB_SWITCH" ]
then
#Backup databases
source ./bkup_databases.sh
fi
#Rotate backups
source ./bkup_rotation.sh
echo "----------BACKUP COMPLETED on $(date +%Y-%m-%d_%T)-------------"
echo ""
|
LimeWub/bkup
|
bkup.sh
|
Shell
|
gpl-3.0
| 2,836 |
#!/bin/bash
# modules/zfs/run.sh
# Copyright Vince Mulhollon 2014
# GPLv3 license
zfs set copies=2 zroot
exit 0
|
vincemulhollon/nihconfig
|
modules/zfs/run.sh
|
Shell
|
gpl-3.0
| 115 |
#!/bin/bash
for i in `find . -name "RTRV-RTG-INFO.tl1" -print`
do
cat $i
done
|
jmacauley/OpenDRAC
|
Mediation/Simulators/src/main/resources/Production/search.sh
|
Shell
|
gpl-3.0
| 80 |
#!/bin/bash
valgrind --leak-check=full --track-origins=yes --suppressions=./SI.supp ./build/SI-Debug
|
wyllman/SI
|
valgrind-debug.sh
|
Shell
|
gpl-3.0
| 102 |
#!/bin/bash
docker rmi mythtv/keycloak
docker build --tag mythtv/keycloak /root/docker/keycloak
VERSION=`docker inspect quay.io/keycloak/keycloak | grep KEYCLOAK_VERSION | cut -f2 -d= | sed -e 's/",//g' | head -1`
docker tag mythtv/keycloak:latest mythtv/keycloak:${VERSION}
|
MythTV/docker
|
rebuild-keycloak.sh
|
Shell
|
gpl-3.0
| 277 |
#!/bin/bash
#repo_url=`git remote show origin | grep 'URL zum Abholen: ' | grep -o -E "https://github.com/.+" | sed -rn 's/https:\/\/([^/]+)\/(.+)/git@\1:\2.git/p'`
repo_url=`git remote get-url origin`
#if [ "$repo_url" =~ "https://github.com/.+" ]
if [ "`echo $repo_url | grep -E 'https://.+'`" != "" ]
then
git remote remove origin
repo_url=`echo $repo_url | sed -rn 's/https:\/\/([^/]+)\/(.+)/git@\1:\2.git/p'`
git remote add origin $repo_url
git remote get-url origin
else
echo "Repo is using ssh? '$repo_url'"
fi
|
user9209/LinuxScripts
|
git/switch_https_to_ssh.sh
|
Shell
|
gpl-3.0
| 531 |
#!/bin/bash
#$ -S /bin/bash
################################################################
# Penn Hippocampus Atlas: Intensity-Based Normalization Script #
################################################################
# Usage function
function usage()
{
cat <<-USAGETEXT
pmatas_ibn_long: penn hippo atlas longitudinal analysis script
usage:
pmatas_ibn_long [options]
required options:
-b image Filename of baseline grayscale image
-c image Filename of baseline reference space image
-f image Filename of followup grayscale image
-g image Filename of followup reference space image
-s image Filename of hippocampus segmentation in baseline
-t mesh Filename of tetrahedral mesh fitted to segmentation
-n string Naming prefix for all files generated
optional:
-w Working directory (default /tmp/pmatlas_XXXXXX)
-d Enable debugging
-i How many iterations of ANTS to run (default=60)
-a globalmat initial global transform in matrix format
-q dim type of deformable registration (def:3, else 2 or 2.5)
-m Use fixed image mask
-o Don't use origin equalization
-r superres Superresolution sampling for 2D registration, e.g. 600x600%
-e modality Modality -- MPRAGE or TSE
USAGETEXT
}
function get_identity_transform()
{
echo "1 0 0 0"
echo "0 1 0 0"
echo "0 0 1 0"
echo "0 0 0 1"
}
function get_2didentity_transform_itk()
{
echo "#Insight Transform File V1.0"
echo "# Transform 0"
echo "Transform: MatrixOffsetTransformBase_double_2_2"
echo "Parameters: 1 0 0 1 0 0"
echo "FixedParameters: 0 0"
}
function setup_irtk()
{
# Set up the parameter file to more or less match ANTS
cat > ${WDIR}/irtk/nreg.param <<-PARMFILE
# Non-rigid registration parameters
Lambda1 = 0
Lambda2 = 0
Lambda3 = 0
Control point spacing in X = 4.8
Control point spacing in Y = 4.8
Control point spacing in Z = 1.6
Subdivision = True
# Registration parameters
No. of resolution levels = 1
No. of bins = 64
Epsilon = 0.0001
Padding value = -32768
Similarity measure = NMI
Interpolation mode = Linear
Optimization method = GradientDescent
# Registration parameters for resolution level 1
Resolution level = 1
Target blurring (in mm) = 0.6
Target resolution (in mm) = ${NATIVERES}
Source blurring (in mm) = 0.6
Source resolution (in mm) = ${NATIVERES}
No. of iterations = 10
No. of steps = 4
Length of steps = 0.736945
PARMFILE
}
# Read the options
while getopts "b:c:e:f:g:w:s:t:n:r:a:q:i:mdho" opt; do
case $opt in
b) TP0=$OPTARG;;
c) DTP0=$OPTARG;;
e) MODALITY=$OPTARG;;
f) TP1=$OPTARG;;
g) DTP1=$OPTARG;;
t) TET=$OPTARG;;
r) ANTSRESAMPLE=$OPTARG;;
s) SEG=$OPTARG;;
w) WDIR=$OPTARG;;
n) PREFIX=$OPTARG;;
a) INITMAT=$OPTARG;;
q) REGDIM=$OPTARG;;
i) ANTSITER=$OPTARG;;
d) set -x -e;;
h) usage ;;
m) DEFMASK=1 ;;
o) NOORIGINMATCH=1 ;;
\?)
exit -1;
;;
:)
echo "Option $OPTARG requires an argument";
exit -1;
;;
esac
done
# ANTS is the default registration program
echo "Deformable registration using ${DEFREGPROG?}"
BIN_ANTS=~srdas/bin/ants_avants
BIN_ANTSITK4=~srdas/bin
BIN_IRTK=~pauly/bin/itk
# Check the existence of image files
if [[ -z $TP0 || ! -f $TP0 ]]; then
echo "Baseline image is missing"
exit -1;
elif [[ -z $TP1 || ! -f $TP1 ]]; then
echo "Followup image is missing"
exit -1;
elif [[ -z $DTP0 || ! -f $DTP0 ]]; then
echo "Baseline refspace image is missing"
exit -1;
elif [[ -z $DTP1 || ! -f $DTP1 ]]; then
echo "Followup refspace image is missing"
exit -1;
elif [[ -z $SEG || ! -f $SEG ]]; then
echo "Baseline segmentation image is missing"
exit -1;
#elif [[ -z $TET || ! -f $TET ]]; then
# echo "Baseline tetrahedron mesh is missing"
# exit -1;
elif [[ -z $PREFIX ]]; then
echo "No prefix specified, use -n option"
exit -1;
fi
# Create a working directory for this registration
if [ -z $WDIR ]; then
WDIR="/tmp/`tempfile pmatlas_XXXXXX`"
fi
mkdir -p $WDIR
# Initialization required ?
if [ -z $INITMAT ]; then
get_identity_transform > $WDIR/identity.mat
INITMAT=$WDIR/identity.mat
fi
# 2, 2.5 or 3 D registration ?
if [ -z $REGDIM ]; then
REGDIM=3
else
REGDIM=$REGDIM
fi
# Modality ?
if [ -z $MODALITY ]; then
MODALITY=TSE
else
MODALITY=$MODALITY
fi
# Superesolution sampling or not
if [ -z $ANTSRESAMPLE ]; then
SUPERRES=""
else
SUPERRES=" -resample $ANTSRESAMPLE"
fi
if [ -z $NOORIGINMATCH ]; then
cp $INITMAT $WDIR/wbRAS.mat
c3d_affine_tool $WDIR/wbRAS.mat -inv -o $WDIR/wbRAS_inv.mat
else
cp $INITMAT $WDIR/omRAS.mat
fi
# This whole section is now done before global registration
:<<'TRIM'
# Create small images in baseline and followup image spaces copy this code to global registration script
c3d $TP0 -as BL $TP1 -as FU \
$SEG -trim 16mm -sdt -smooth 4mm -thresh 0 inf 1 0 -as M \
-push BL -push M -dilate 1 3x3x3mm -reslice-identity -trim 10mm -as SBL -o $WDIR/bltrimdef.nii.gz \
-push FU -push M -dilate 1 3x3x3mm -reslice-matrix $WDIR/wbRAS_inv.mat -trim 10mm -as SFU -o $WDIR/futrimdef.nii.gz \
-push SBL -push BL -int NN -reslice-identity -o $WDIR/bltrim.nii.gz \
-push SFU -push FU -int NN -reslice-identity -o $WDIR/futrim.nii.gz
# Check if mask contains the whole segmentation
maxdiff=`c3d $SEG -trim 16mm -thresh 1 inf 1 0 -as M $WDIR/../bltrimdef.nii.gz -push M -reslice-identity \
-trim 10mm -binarize -scale -1 \
-add -info-full | grep "Intensity Range" | sed -e 's/]//g' | awk -F ',' {'print $2'}`
if [ $maxdiff -lt 0 ]; then
echo "mask doesn't contain the whole segmentation"
exit -1;
fi
# Check if mask is genus zero
c3d $WDIR/../bltrimdef.nii.gz -connected-components -threshold 1 1 1 0 -dilate 1 2x2x2vox -pad 1x1x1vox 1x1x1vox 0 \
-o $WDIR/padded.nii.gz
genus=`CheckTopology $WDIR/padded.nii.gz | tail -1 | awk {'print $2'}`
if [ $genus != 0 ]; then
echo "mask is not a sphere"
exit -1;
fi
rm -f $WDIR/padded.nii.gz
TRIM
# -------------------------- work for deformable registration starts here ----------------------------------
ln -sf $TP1 $WDIR/futrim.nii.gz
ln -sf $TP0 $WDIR/bltrim.nii.gz
ln -sf $DTP0 $WDIR/bltrimdef.nii.gz
ln -sf $DTP1 $WDIR/futrimdef.nii.gz
# Make the origins of the BL and FU images the same (this will make the
# rigid transform between then smaller, and will minimize ANTS-related issues)
BLORIG=($(c3d $WDIR/bltrim.nii.gz -info-full | head -n 3 | tail -n 1 | sed -e "s/.*{\[//" -e "s/\],.*//"))
c3d $WDIR/futrim.nii.gz -origin ${BLORIG[0]}x${BLORIG[1]}x${BLORIG[2]}mm -o $WDIR/futrim_om.nii.gz
# Recompute the transformation between the images
if [ -z $NOORIGINMATCH ]; then
c3d_affine_tool \
-sform $WDIR/futrim_om.nii.gz \
-sform $WDIR/futrim.nii.gz -inv \
-mult $WDIR/wbRAS.mat -mult -o $WDIR/omRAS.mat
fi
# Why the following is wrong ? TODO
#c3d_affine_tool \
# $WDIR/wbRAS.mat \
# -sform $WDIR/futrim.nii.gz \
# -sform $WDIR/futrim_om.nii.gz -inv \
# -mult -mult -o $WDIR/omRAS.mat
# For any fixed image F, suppose we know the transform T1, between moving image M1 and F.
# Now, we have another moving image M2 which is the same as M1, except in a different physical space, related to
# M1 by a rigid transformation.
# And we want to find the transform T2, between moving image M2 and F.
# Voxel V in fixed image F should have its image at the same voxel locations in both moving images M1 and M2.
# Thus, we write, equating the two voxel locations of the image points:
# inv(sform(M1)) * T1 * sform(F) *V = inv(sform(M2)) * T2 * sform(F) *V
# Or
# T2 = sform(M2) * inv(sform(M1)) * T1
# Take the square root of the mapping. This brings moving to half-way point
c3d_affine_tool $WDIR/omRAS.mat -oitk $WDIR/omRAS_itk.txt
c3d_affine_tool $WDIR/omRAS.mat -sqrt -o $WDIR/omRAS_half.mat -oitk $WDIR/omRAS_half_itk.txt
c3d_affine_tool $WDIR/omRAS_half.mat -inv -o $WDIR/omRAS_halfinv.mat -oitk $WDIR/omRAS_half_inv_itk.txt
c3d_affine_tool $WDIR/omRAS.mat -inv -o $WDIR/omRAS_inv.mat -oitk $WDIR/omRAS_inv_itk.txt
# Create the halfway reference space
c3d_affine_tool -sform $WDIR/futrim_om.nii.gz -sform $WDIR/bltrim.nii.gz -inv -mult -sqrt -sform $WDIR/bltrim.nii.gz -mult -o $WDIR/hwtrimspace.mat
# resample to neutral space - incorporates initialization and subsequent flirt between T2 images
# generate trimmed images
c3d $WDIR/bltrim.nii.gz \
-o $WDIR/bltrim.mha \
-set-sform $WDIR/hwtrimspace.mat \
$WDIR/bltrimdef.nii.gz -dilate 1 5x5x5mm -reslice-matrix $WDIR/omRAS_halfinv.mat -trim 10mm -o $WDIR/hwtrimdef.nii.gz \
-o $WDIR/hwtrimdef.mha \
$WDIR/hwtrimdef.nii.gz $WDIR/bltrim.nii.gz -reslice-matrix $WDIR/omRAS_halfinv.mat -o $WDIR/bltrim_to_hw.nii.gz \
-o $WDIR/bltrim_to_hw.mha \
$WDIR/hwtrimdef.nii.gz $WDIR/futrim_om.nii.gz -reslice-matrix $WDIR/omRAS_half.mat -o $WDIR/futrim_om_to_hw.nii.gz \
-o $WDIR/futrim_om_to_hw.mha
rm -f $WDIR/bltrim.mha $WDIR/hwtrimdef.mha $WDIR/bltrim_to_hw.mha $WDIR/futrim_om_to_hw.mha
# Check if halfway mask contains the whole segmentation
maxdiff=`c3d $SEG -trim 16mm -thresh 1 inf 1 0 -as M $WDIR/hwtrimdef.nii.gz -push M -reslice-matrix \
$WDIR/omRAS_halfinv.mat -trim 10mm -binarize -scale -1 \
-add -info-full | grep "Intensity Range" | sed -e 's/]//g' | awk -F ',' {'print $2'}`
if [ $maxdiff -lt 0 ]; then
echo "halfway mask doesn't contain the whole segmentation"
exit -1;
fi
# Check if halfway mask is genus zero
c3d $WDIR/hwtrimdef.nii.gz -connected-components -threshold 1 1 1 0 -dilate 1 2x2x2vox -pad 1x1x1vox 1x1x1vox 0 \
-o $WDIR/padded.nii.gz
genus=`CheckTopology $WDIR/padded.nii.gz | tail -1 | awk {'print $2'}`
if [ $genus != 0 ]; then
echo "halfway mask is not a sphere"
exit -1;
fi
rm -f $WDIR/padded.nii.gz
# ANTSITER=0
# Run ANTS over the masked region (if there are more than 0 iterations)
mkdir -p $WDIR/ants
mkdir -p $WDIR/irtk
mkdir -p $WDIR/of
if [[ ${ANTSITER=100} > 0 ]]; then
if [ "$MODALITY" == "TSE" ]; then
REGUL="0.8,0.2"
else
REGUL="2.0,0.5"
REGUL="3.0,0"
fi
if [ ${RIGIDMODE?} = "BL" ]; then
# Run one of three different kinds of deformable registration
case $REGDIM in
"3") echo "performing 3D deformable registration";
ANTSITER=50x50x100;
#ANTSITER=100;
# Use mask or not
if [ -z $DEFMASK ]; then
maskopt=""
else
maskopt="-x $WDIR/bltrimdef.nii.gz"
fi
c3d $WDIR/bltrim.nii.gz $WDIR/futrim_om.nii.gz -reslice-itk $WDIR/omRAS_itk.txt -o $WDIR/futrim_om_resliced_to_bltrim.nii.gz
if [ ${DEFREGPROG} = "ants" ]; then
rm -f $WDIR/ants/ants_output_3d.txt
# Execute ANTS with special function
$BIN_ANTS/ANTS 3 $maskopt \
-m PR[$WDIR/bltrim.nii.gz,$WDIR/futrim_om_resliced_to_bltrim.nii.gz,1,4] \
-o $WDIR/ants/antsreg3d.nii.gz \
-i $ANTSITER \
-v -t SyN[${ASTEPSIZE?}] -r $REGUL \
--continue-affine false | tee $WDIR/ants/ants_output_3d.txt;
elif [ ${DEFREGPROG} = "irtk" ]; then
# Get the native resolution
NATIVERES=$(c3d $WDIR/bltrim_to_hw.nii.gz -info-full | grep "Voxel Spacing" | sed -e "s/.*\[//" -e "s/,//g" -e "s/\]//");
setup_irtk;
$BIN_IRTK/nreg $WDIR/bltrim_to_hw.nii.gz $WDIR/futrim_om_to_hw.nii.gz \
-parin $WDIR/irtk/nreg.param \
-dofout $WDIR/irtk/nreg_3d.dof -parout $WDIR/irtk/nreg_out.param | tee $WDIR/irtk/irtk_output_3d.txt;
# Extract the warps as images
$BIN_IRTK/dof2image $WDIR/hwtrimdef.nii.gz $WDIR/irtk/nreg_3d.dof \
$WDIR/irtk/irtkreg3dWarpxvec.nii.gz \
$WDIR/irtk/irtkreg3dWarpyvec.nii.gz \
$WDIR/irtk/irtkreg3dWarpzvec.nii.gz;
else
echo "Unknown deformable registration program";
exit -1;
fi
# Warp followup to baseline
$BIN_ANTS/WarpImageMultiTransform 3 $WDIR/futrim_om.nii.gz $WDIR/futrim_om_to_bltrim_warped_3d.nii.gz \
-R $WDIR/bltrimdef.nii.gz $WDIR/${DEFREGPROG}/${DEFREGPROG}reg3dWarp.nii.gz $WDIR/omRAS_itk.txt;
c3d $WDIR/${DEFREGPROG}/${DEFREGPROG}reg3dWarp?vec.nii.gz -omc 3 $WDIR/${DEFREGPROG}/${DEFREGPROG}reg3dWarp.mha;
c3d $WDIR/bltrim.nii.gz $WDIR/futrim_om_resliced_to_bltrim.nii.gz -scale -1 -add -o $WDIR/regdiffbefore_3d.nii.gz
c3d $WDIR/bltrim.nii.gz $WDIR/futrim_om_to_hw_warped_3d.nii.gz -histmatch 3 \
$WDIR/bltrim.nii.gz -scale -1 -add -o $WDIR/regdiffafter_3d.nii.gz
echo before `c3d $WDIR/bltrim.nii.gz $WDIR/futrim_om_resliced_to_bltrim.nii.gz -ncor` > $WDIR/regncor_3d.txt
echo after `c3d $WDIR/bltrim.nii.gz $WDIR/futrim_om_to_bltrim_warped_3d.nii.gz -ncor` >> $WDIR/regncor_3d.txt
;;
"2.5") echo "performing 2.5D deformable registration, restrict deformation in z******RIGIDMODE BL not implemented****";
ANTSITER=50x50x100;
#ANTSITER=100;
# Use mask or not
if [ -z $DEFMASK ]; then
maskopt=""
else
maskopt="-x $WDIR/hwtrimdef.nii.gz"
fi
rm -f $WDIR/ants/ants_output_2.5d.txt
$BIN_ANTS/ANTS 3 $maskopt \
-m PR[$WDIR/bltrim_to_hw.nii.gz,$WDIR/futrim_om_to_hw.nii.gz,1,4] \
-o $WDIR/ants/antsreg2.5d.nii.gz \
-i $ANTSITER \
-v -t SyN[${ASTEPSIZE}] -r $REGUL \
--Restrict-Deformation 1x1x0 \
--continue-affine false | tee $WDIR/ants/ants_output_2.5d.txt;
# Warp followup to baseline and to halfway space
$BIN_ANTS/WarpImageMultiTransform 3 $WDIR/futrim_om.nii.gz $WDIR/futrim_om_to_bltrim_warped_2.5d.nii.gz \
-R $WDIR/bltrimdef.nii.gz $WDIR/omRAS_half_itk.txt $WDIR/ants/antsreg2.5dWarp.nii.gz $WDIR/omRAS_half_itk.txt;
$BIN_ANTS/WarpImageMultiTransform 3 $WDIR/futrim_om.nii.gz $WDIR/futrim_om_to_hw_warped_2.5d.nii.gz \
-R $WDIR/hwtrimdef.nii.gz $WDIR/ants/antsreg2.5dWarp.nii.gz $WDIR/omRAS_half_itk.txt;
c3d $WDIR/ants/antsreg2.5dWarp?vec.nii.gz -omc 3 $WDIR/ants/antsreg2.5dWarp.mha;
c3d $WDIR/bltrim_to_hw.nii.gz $WDIR/futrim_om_to_hw.nii.gz -histmatch 3 \
$WDIR/bltrim_to_hw.nii.gz -scale -1 -add -o $WDIR/regdiffbefore_2.5d.nii.gz
c3d $WDIR/bltrim_to_hw.nii.gz $WDIR/futrim_om_to_hw_warped_2.5d.nii.gz -histmatch 3 \
$WDIR/bltrim_to_hw.nii.gz -scale -1 -add -o $WDIR/regdiffafter_2.5d.nii.gz
echo before `c3d $WDIR/bltrim_to_hw.nii.gz $WDIR/futrim_om_to_hw.nii.gz -ncor` > $WDIR/regncor_2.5d.txt
echo after `c3d $WDIR/bltrim_to_hw.nii.gz $WDIR/futrim_om_to_hw_warped_2.5d.nii.gz -ncor` >> $WDIR/regncor_2.5d.txt
;;
"2") echo "performing 2D deformable registration on corresponding z slices";
ANTSITER=50x50x300;
#ANTSITER=400;
zsize=`c3d $WDIR/bltrimdef.nii.gz -info | cut -f 1 -d ";" | cut -f 3 -d "," | sed -e 's/]//g' -e 's/ //g'`;
> $WDIR/regncor_2.txt
for ((i=0; i < ${zsize}; i++)) do
c3d $WDIR/bltrimdef.nii.gz -slice z $i -o $WDIR/bltrimdef_${i}.nii.gz;
c3d $WDIR/bltrim.nii.gz -slice z $i -o $WDIR/bltrim_${i}.nii.gz -o $WDIR/bltrim_${i}.mha;
c3d $WDIR/futrim_om_resliced_to_bltrim.nii.gz -slice z $i -o $WDIR/futrim_om_resliced_to_bltrim_${i}.nii.gz;
c2d $WDIR/bltrimdef_${i}.nii.gz $SUPERRES -o $WDIR/bltrimdef_${i}.nii.gz
c2d $WDIR/bltrim_${i}.nii.gz $SUPERRES -o $WDIR/bltrim_${i}.nii.gz
c2d $WDIR/futrim_om_resliced_to_bltrim_${i}.nii.gz $SUPERRES -o $WDIR/futrim_om_resliced_to_bltrim_${i}.nii.gz
# Use mask or not
if [ -z $DEFMASK ]; then
maskopt=""
else
maskopt="-x $WDIR/bltrimdef_${i}.nii.gz"
fi
rm -f $WDIR/ants/ants_output_${i}.txt
$BIN_ANTS/ANTS 2 $maskopt \
-m PR[$WDIR/bltrim_${i}.nii.gz,$WDIR/futrim_om_resliced_to_bltrim_${i}.nii.gz,1,4] \
-o $WDIR/ants/antsreg_${i}.nii.gz \
-i $ANTSITER \
-v -t SyN[${ASTEPSIZE?}] -r $REGUL \
--continue-affine false | tee $WDIR/ants/ants_output_${i}.txt;
# -v -t SyN[${ASTEPSIZE?}] -r $REGUL \
# -v -t SyN[0.1,5,0.001] --geodesic 2 -r $REGUL] \
# TODO handle properly. This is a terrible hack. When one image is empty, ANTS bails out with NaNs in energy
# without any warning. If this happens warp files are not generated.
if [ ! -f $WDIR/ants/antsreg_${i}Warpxvec.nii.gz ]; then
c3d $WDIR/bltrimdef_${i}.nii.gz -dup -scale -1 -add -o $WDIR/ants/antsreg_${i}Warpxvec.nii.gz
cp $WDIR/ants/antsreg_${i}Warpxvec.nii.gz $WDIR/ants/antsreg_${i}Warpyvec.nii.gz
cp $WDIR/ants/antsreg_${i}Warpxvec.nii.gz $WDIR/ants/antsreg_${i}InverseWarpxvec.nii.gz
cp $WDIR/ants/antsreg_${i}Warpyvec.nii.gz $WDIR/ants/antsreg_${i}InverseWarpyvec.nii.gz
get_2didentity_transform_itk > $WDIR/ants/antsreg_${i}Affine.txt
fi
c3d $WDIR/ants/antsreg_${i}Warpxvec.nii.gz -dup -scale -1 -add -o $WDIR/ants/antsreg_${i}Warpzvec.nii.gz
c3d $WDIR/ants/antsreg_${i}InverseWarpxvec.nii.gz -dup -scale -1 -add -o $WDIR/ants/antsreg_${i}InverseWarpzvec.nii.gz
$BIN_ANTS/WarpImageMultiTransform 2 $WDIR/futrim_om_resliced_to_bltrim_${i}.nii.gz $WDIR/futrim_om_to_bltrim_warped_${i}.nii.gz \
-R $WDIR/bltrimdef_${i}.nii.gz $WDIR/ants/antsreg_${i}Warp.nii.gz $WDIR/ants/antsreg_${i}Affine.txt;
c3d $WDIR/ants/antsreg_${i}Warp?vec.nii.gz -omc 3 $WDIR/ants/antsreg_${i}Warp.mha;
c2d $WDIR/bltrim_${i}.nii.gz $WDIR/futrim_om_resliced_to_bltrim_${i}.nii.gz \
-histmatch 2 $WDIR/bltrim_${i}.nii.gz -scale -1 -add -o $WDIR/regdiffbefore_${i}.nii.gz
c2d $WDIR/bltrim_${i}.nii.gz $WDIR/futrim_om_to_bltrim_warped_${i}.nii.gz \
-histmatch 2 $WDIR/bltrim_${i}.nii.gz -scale -1 -add -o $WDIR/regdiffafter_${i}.nii.gz
echo ${i} before `c2d $WDIR/bltrim_${i}.nii.gz $WDIR/futrim_om_resliced_to_bltrim_${i}.nii.gz -ncor` >> $WDIR/regcor_2.txt
echo ${i} after `c2d $WDIR/bltrim_${i}.nii.gz $WDIR/futrim_om_to_bltrim_warped_${i}.nii.gz -ncor` >> $WDIR/regcor_2.txt
done
;;
*) echo "Unknown deformable registration option";
exit -1;
;;
esac
# Move the tetmesh to the halfway point
#warpmesh $TET $WDIR/tet2hw.vtk $WDIR/omRAS_half.mat
# Apply the warp to the mesh
#warpmesh -w ants $WDIR/tet2hw.vtk $WDIR/tetwarp.vtk $WDIR/ants/antsregWarp?vec.nii.gz
# Apply the second half of the rigid transform to the mesh
#warpmesh $WDIR/tetwarp.vtk ${PREFIX}_tetmesh.vtk $WDIR/omRAS_half.mat
elif [ ${RIGIDMODE?} = "HW" ]; then
# Run one of three different kinds of deformable registration
case $REGDIM in
"3") echo "performing 3D deformable registration";
ANTSITER=100x100x100x1;
#ANTSITER=100;
ASTEPSIZE=0.5;
# Use mask or not
if [ -z $DEFMASK ]; then
maskopt=""
else
maskopt="--masks [$WDIR/hwtrimdef.nii.gz, $WDIR/hwtrimdef.nii.gz]"
fi
if [ ${DEFREGPROG} = "ants" ]; then
rm -f $WDIR/ants/ants_output_3d.txt
# Execute ANTS with special function
# $BIN_ANTS/ANTS 3 $maskopt \
# -m PR[$WDIR/bltrim_to_hw.nii.gz,$WDIR/futrim_om_to_hw.nii.gz,1,4] \
# -o $WDIR/ants/antsreg3d.nii.gz \
# -i $ANTSITER \
# -v -t SyN[${ASTEPSIZE?}] -r $REGUL \
# --continue-affine false | tee $WDIR/ants/ants_output_3d.txt;
$BIN_ANTSITK4/antsRegistration --dimensionality 3 $maskopt \
-m CC[$WDIR/bltrim_to_hw.nii.gz,$WDIR/futrim_om_to_hw.nii.gz,1,4] \
-o [ $WDIR/ants/antsreg3d , ${WDIR}/futrim_om_to_hw_warped_3d_ITKv4.nii.gz ] \
-c [ $ANTSITER, 0, 20 ] \
-t SyN[ ${ASTEPSIZE?} , $REGUL ] \
-s 3x2x1x0 -f 6x4x2x1 | tee $WDIR/ants/ants_output_3d.txt;
# $BIN_ANTS/ANTS_unbiased 3 $maskopt \
# -m PR[$WDIR/bltrim.nii.gz,$WDIR/futrim_om.nii.gz,1,4] \
# -o $WDIR/ants/antsreg3d.nii.gz \
# -i $ANTSITER \
# -F $WDIR/omRAS_half_inv_itk.txt \
# -a $WDIR/omRAS_half_itk.txt \
# --fixed-image-initial-affine-ref-image $WDIR/hwtrimdef.nii.gz \
# -v -t SyN[${ASTEPSIZE?}] -r $REGUL \
# --continue-affine false | tee $WDIR/ants/ants_output_3d.txt;
# -m PR[$WDIR/bltrim_to_hw.nii.gz,$WDIR/futrim_om_to_hw.nii.gz,1,4] \ # PR metric
# -m MI[$WDIR/bltrim_to_hw.nii.gz,$WDIR/futrim_om_to_hw.nii.gz,1,32] \ # MI metric
# -v -t SyN[${ASTEPSIZE?}] -r $REGUL \ # For TSE, Gauss[0.5,0.2]
# -v -t SyN[${ASTEPSIZE?}] -r $REGUL \ # For MPRAGE, Gauss[2.0,0.5]
# -v -t SyN[0.25,5,0.1] --geodesic 2 -r Gauss[0.5, 0.2] \
elif [ ${DEFREGPROG} = "irtk" ]; then
# Get the native resolution
NATIVERES=$(c3d $WDIR/bltrim_to_hw.nii.gz -info-full | grep "Voxel Spacing" | sed -e "s/.*\[//" -e "s/,//g" -e "s/\]//");
setup_irtk;
$BIN_IRTK/nreg $WDIR/bltrim_to_hw.nii.gz $WDIR/futrim_om_to_hw.nii.gz \
-parin $WDIR/irtk/nreg.param \
-dofout $WDIR/irtk/nreg_3d.dof -parout $WDIR/irtk/nreg_out.param | tee $WDIR/irtk/irtk_output_3d.txt;
# Extract the warps as images
$BIN_IRTK/dof2image $WDIR/hwtrimdef.nii.gz $WDIR/irtk/nreg_3d.dof \
$WDIR/irtk/irtkreg3dWarpxvec.nii.gz \
$WDIR/irtk/irtkreg3dWarpyvec.nii.gz \
$WDIR/irtk/irtkreg3dWarpzvec.nii.gz;
else
echo "Unknown deformable registration program";
exit -1;
fi
# Split multicomponent Warps
c3d -mcs $WDIR/ants/antsreg3d0Warp.nii.gz -oo $WDIR/ants/antsreg3dWarpxvec.nii.gz $WDIR/ants/antsreg3dWarpyvec.nii.gz $WDIR/ants/antsreg3dWarpzvec.nii.gz
c3d -mcs $WDIR/ants/antsreg3d0InverseWarp.nii.gz -oo $WDIR/ants/antsreg3dInverseWarpxvec.nii.gz $WDIR/ants/antsreg3dInverseWarpyvec.nii.gz $WDIR/ants/antsreg3dInverseWarpzvec.nii.gz
# Warp followup to baseline
$BIN_ANTS/WarpImageMultiTransform 3 $WDIR/futrim_om.nii.gz $WDIR/futrim_om_to_bltrim_warped_3d.nii.gz \
-R $WDIR/bltrimdef.nii.gz $WDIR/omRAS_half_itk.txt $WDIR/${DEFREGPROG}/${DEFREGPROG}reg3dWarp.nii.gz $WDIR/omRAS_half_itk.txt;
$BIN_ANTS/WarpImageMultiTransform 3 $WDIR/futrim_om.nii.gz $WDIR/futrim_om_to_hw_warped_3d.nii.gz \
-R $WDIR/hwtrimdef.nii.gz $WDIR/${DEFREGPROG}/${DEFREGPROG}reg3dWarp.nii.gz $WDIR/omRAS_half_itk.txt;
# c3d $WDIR/${DEFREGPROG}/${DEFREGPROG}reg3dWarp?vec.nii.gz -omc 3 $WDIR/${DEFREGPROG}/${DEFREGPROG}reg3dWarp.mha;
# c3d $WDIR/bltrim_to_hw.nii.gz $WDIR/futrim_om_to_hw.nii.gz -histmatch 3 \
# $WDIR/bltrim_to_hw.nii.gz -scale -1 -add -o $WDIR/regdiffbefore_3d.nii.gz
# c3d $WDIR/bltrim_to_hw.nii.gz $WDIR/futrim_om_to_hw_warped_3d.nii.gz -histmatch 3 \
# $WDIR/bltrim_to_hw.nii.gz -scale -1 -add -o $WDIR/regdiffafter_3d.nii.gz
echo before `c3d $WDIR/bltrim_to_hw.nii.gz $WDIR/futrim_om_to_hw.nii.gz -ncor` > $WDIR/regncor_3d.txt
echo after `c3d $WDIR/bltrim_to_hw.nii.gz $WDIR/futrim_om_to_hw_warped_3d.nii.gz -ncor` >> $WDIR/regncor_3d.txt
;;
"2.5") echo "performing 2.5D deformable registration, restrict deformation in z";
ANTSITER=50x50x100;
#ANTSITER=100;
# Use mask or not
if [ -z $DEFMASK ]; then
maskopt=""
else
maskopt="-x $WDIR/hwtrimdef.nii.gz"
fi
rm -f $WDIR/ants/ants_output_2.5d.txt
$BIN_ANTS/ANTS 3 $maskopt \
-m PR[$WDIR/bltrim_to_hw.nii.gz,$WDIR/futrim_om_to_hw.nii.gz,1,4] \
-o $WDIR/ants/antsreg2.5d.nii.gz \
-i $ANTSITER \
-v -t SyN[${ASTEPSIZE}] -r $REGUL \
--Restrict-Deformation 1x1x0 \
--continue-affine false | tee $WDIR/ants/ants_output_2.5d.txt;
# Warp followup to baseline and to halfway space
$BIN_ANTS/WarpImageMultiTransform 3 $WDIR/futrim_om.nii.gz $WDIR/futrim_om_to_bltrim_warped_2.5d.nii.gz \
-R $WDIR/bltrimdef.nii.gz $WDIR/omRAS_half_itk.txt $WDIR/ants/antsreg2.5dWarp.nii.gz $WDIR/omRAS_half_itk.txt;
$BIN_ANTS/WarpImageMultiTransform 3 $WDIR/futrim_om.nii.gz $WDIR/futrim_om_to_hw_warped_2.5d.nii.gz \
-R $WDIR/hwtrimdef.nii.gz $WDIR/ants/antsreg2.5dWarp.nii.gz $WDIR/omRAS_half_itk.txt;
c3d $WDIR/ants/antsreg2.5dWarp?vec.nii.gz -omc 3 $WDIR/ants/antsreg2.5dWarp.mha;
c3d $WDIR/bltrim_to_hw.nii.gz $WDIR/futrim_om_to_hw.nii.gz -histmatch 3 \
$WDIR/bltrim_to_hw.nii.gz -scale -1 -add -o $WDIR/regdiffbefore_2.5d.nii.gz
c3d $WDIR/bltrim_to_hw.nii.gz $WDIR/futrim_om_to_hw_warped_2.5d.nii.gz -histmatch 3 \
$WDIR/bltrim_to_hw.nii.gz -scale -1 -add -o $WDIR/regdiffafter_2.5d.nii.gz
echo before `c3d $WDIR/bltrim_to_hw.nii.gz $WDIR/futrim_om_to_hw.nii.gz -ncor` > $WDIR/regncor_2.5d.txt
echo after `c3d $WDIR/bltrim_to_hw.nii.gz $WDIR/futrim_om_to_hw_warped_2.5d.nii.gz -ncor` >> $WDIR/regncor_2.5d.txt
;;
"2") echo "performing 2D deformable registration on corresponding z slices";
ANTSITER=50x50x300;
#ANTSITER=400;
zsize=`c3d $WDIR/hwtrimdef.nii.gz -info | cut -f 1 -d ";" | cut -f 3 -d "," | sed -e 's/]//g' -e 's/ //g'`;
> $WDIR/regncor_2.txt
for ((i=0; i < ${zsize}; i++)) do
c3d $WDIR/hwtrimdef.nii.gz -slice z $i -o $WDIR/hwtrimdef_${i}.nii.gz;
c3d $WDIR/bltrim_to_hw.nii.gz -slice z $i -o $WDIR/bltrim_to_hw_${i}.nii.gz -o $WDIR/bltrim_to_hw_${i}.mha;
c3d $WDIR/futrim_om_to_hw.nii.gz -slice z $i -o $WDIR/futrim_om_to_hw_${i}.nii.gz -o $WDIR/futrim_om_to_hw_${i}.mha;
c2d $WDIR/hwtrimdef_${i}.nii.gz $SUPERRES -o $WDIR/hwtrimdef_${i}.nii.gz
c2d $WDIR/bltrim_to_hw_${i}.nii.gz $SUPERRES -o $WDIR/bltrim_to_hw_${i}.nii.gz
c2d $WDIR/futrim_om_to_hw_${i}.nii.gz $SUPERRES -o $WDIR/futrim_om_to_hw_${i}.nii.gz
# Use mask or not
if [ -z $DEFMASK ]; then
maskopt=""
else
maskopt="-x $WDIR/hwtrimdef_${i}.nii.gz"
fi
if [ ${DEFREGPROG} = "ants" ]; then
rm -f $WDIR/ants/ants_output_${i}.txt
$BIN_ANTS/ANTS 2 $maskopt \
-m PR[$WDIR/bltrim_to_hw_${i}.nii.gz,$WDIR/futrim_om_to_hw_${i}.nii.gz,1,4] \
-o $WDIR/ants/antsreg_${i}.nii.gz \
-i $ANTSITER \
-v -t SyN[${ASTEPSIZE?}] -r $REGUL \
--continue-affine false | tee $WDIR/ants/ants_output_${i}.txt;
# -v -t SyN[${ASTEPSIZE?}] -r $REGUL \
# -v -t SyN[0.1,5,0.001] --geodesic 2 -r $REGUL] \
# TODO handle properly. This is a terrible hack. When one image is empty, ANTS bails out with NaNs in energy
# without any warning. If this happens warp files are not generated.
if [ ! -f $WDIR/ants/antsreg_${i}Warpxvec.nii.gz ]; then
c3d $WDIR/hwtrimdef_${i}.nii.gz -dup -scale -1 -add -o $WDIR/ants/antsreg_${i}Warpxvec.nii.gz
cp $WDIR/ants/antsreg_${i}Warpxvec.nii.gz $WDIR/ants/antsreg_${i}Warpyvec.nii.gz
cp $WDIR/ants/antsreg_${i}Warpxvec.nii.gz $WDIR/ants/antsreg_${i}InverseWarpxvec.nii.gz
cp $WDIR/ants/antsreg_${i}Warpyvec.nii.gz $WDIR/ants/antsreg_${i}InverseWarpyvec.nii.gz
get_2didentity_transform_itk > $WDIR/ants/antsreg_${i}Affine.txt
fi
elif [ ${DEFREGPROG} = "of" ]; then
:<<'NOOF'
/home/local/matlab_r2009b/bin/matlab -singleCompThread -nodisplay <<MAT2
mypwd=pwd;
cd('${WDIR}/of');
vacind_long_opticalflow('${WDIR}/bltrim_to_hw_${i}.nii','${WDIR}/futrim_om_to_hw_${i}.nii');
cd(mypwd);
MAT2
mv ${WDIR}/of/of_Warpxvec.nii ${WDIR}/of/ofreg_${i}Warpxvec.nii
mv ${WDIR}/of/of_Warpyvec.nii ${WDIR}/of/ofreg_${i}Warpyvec.nii
NOOF
xvox=`c2d ${WDIR}/bltrim_to_hw_${i}.nii.gz -info-full |grep "Voxel Spacing" | cut -f 2 -d "[" | cut -f 1 -d "]" | cut -f 1 -d ","`
c2d ${WDIR}/of/ofreg_${i}Warpxvec.nii.gz -scale $xvox -o ${WDIR}/of/ofreg_${i}Warpxvec.nii.gz
yvox=`c2d ${WDIR}/bltrim_to_hw_${i}.nii.gz -info-full |grep "Voxel Spacing" | cut -f 2 -d "[" | cut -f 1 -d "]" | cut -f 2 -d ","`
c2d ${WDIR}/of/ofreg_${i}Warpyvec.nii.gz -scale $yvox -o ${WDIR}/of/ofreg_${i}Warpyvec.nii.gz
# gzip -f ${WDIR}/of/ofreg_${i}Warp?vec.nii
get_2didentity_transform_itk > $WDIR/of/ofreg_${i}Affine.txt
else
echo "Unknown deformable registration program";
exit -1;
fi
c3d $WDIR/${DEFREGPROG}/${DEFREGPROG}reg_${i}Warpxvec.nii.gz -dup -scale -1 -add -o $WDIR/${DEFREGPROG}/${DEFREGPROG}reg_${i}Warpzvec.nii.gz
if [ ${DEFREGPROG} = "ants" ]; then
c3d $WDIR/${DEFREGPROG}/${DEFREGPROG}reg_${i}InverseWarpxvec.nii.gz -dup -scale -1 -add -o $WDIR/${DEFREGPROG}/${DEFREGPROG}reg_${i}InverseWarpzvec.nii.gz
fi
$BIN_ANTS/WarpImageMultiTransform 2 $WDIR/futrim_om_to_hw_${i}.nii.gz $WDIR/futrim_om_to_hw_warped_${i}.nii.gz \
-R $WDIR/hwtrimdef_${i}.nii.gz $WDIR/${DEFREGPROG}/${DEFREGPROG}reg_${i}Warp.nii.gz $WDIR/${DEFREGPROG}/${DEFREGPROG}reg_${i}Affine.txt;
if [ ${DEFREGPROG} = "ants" ]; then
$BIN_ANTS/WarpImageMultiTransform 2 $WDIR/bltrim_to_hw_${i}.nii.gz $WDIR/bltrim_to_hw_warped_${i}.nii.gz \
-R $WDIR/hwtrimdef_${i}.nii.gz -i $WDIR/ants/antsreg_${i}Affine.txt $WDIR/ants/antsreg_${i}InverseWarp.nii.gz;
fi
c3d $WDIR/${DEFREGPROG}/${DEFREGPROG}reg_${i}Warp?vec.nii.gz -omc 3 $WDIR/${DEFREGPROG}/${DEFREGPROG}reg_${i}Warp.mha;
c2d $WDIR/bltrim_to_hw_${i}.nii.gz $WDIR/futrim_om_to_hw_${i}.nii.gz \
-histmatch 2 $WDIR/bltrim_to_hw_${i}.nii.gz -scale -1 -add -o $WDIR/regdiffbefore_${i}.nii.gz
c2d $WDIR/bltrim_to_hw_${i}.nii.gz $WDIR/futrim_om_to_hw_warped_${i}.nii.gz \
-histmatch 2 $WDIR/bltrim_to_hw_${i}.nii.gz -scale -1 -add -o $WDIR/regdiffafter_${i}.nii.gz
echo ${i} before `c2d $WDIR/bltrim_to_hw_${i}.nii.gz $WDIR/futrim_om_to_hw_${i}.nii.gz -ncor` >> $WDIR/regncor_2.txt
echo ${i} after `c2d $WDIR/bltrim_to_hw_${i}.nii.gz $WDIR/futrim_om_to_hw_warped_${i}.nii.gz -ncor` >> $WDIR/regncor_2.txt
done
;;
*) echo "Unknown deformable registration option";
exit -1;
;;
esac
# Move the tetmesh to the halfway point
#warpmesh $TET $WDIR/tet2hw.vtk $WDIR/omRAS_half.mat
# Apply the warp to the mesh
#warpmesh -w ants $WDIR/tet2hw.vtk $WDIR/tetwarp.vtk $WDIR/ants/antsregWarp?vec.nii.gz
# Apply the second half of the rigid transform to the mesh
#warpmesh $WDIR/tetwarp.vtk ${PREFIX}_tetmesh.vtk $WDIR/omRAS_half.mat
else
# Execute ANTS with special function
/home/srdas/bin/ants_avants/ANTS 3 \
-x $WDIR/bltrimdef.nii.gz \
-m PR[$WDIR/bltrim.nii.gz,$WDIR/futrim_om.nii.gz,1,4] \
-a $WDIR/omRAS_itk.txt \
-o $WDIR/ants/antsreg.nii.gz \
-i $ANTSITER \
-v -t SyN[${ASTEPSIZE?}] -r Gauss[2.0] \
--continue-affine false | tee $WDIR/ants/ants_output.txt
# Apply the warp to the mesh
warpmesh -w ants $TET $WDIR/tetwarp.vtk $WDIR/ants/antsregWarp?vec.nii.gz
# Apply the second half of the rigid transform to the mesh
warpmesh $WDIR/tetwarp.vtk ${PREFIX}_tetmesh.vtk $WDIR/omRAS.mat
fi
# Measure the Jacobian between the meshes
#tetjac $TET ${PREFIX}_tetmesh.vtk ${PREFIX}_tetjac.vtk \
# | grep VOLUME_STATS | cut -f 3 -d ' ' > ${PREFIX}_tetvol.txt
else
# Just transform the mesh using FLIRT transform
#warpmesh $TET ${PREFIX}_tetmesh.vtk $WDIR/wbRAS.mat
echo "Not doing deformable registration"
fi
|
sandhitsu/aloha
|
scripts/vacind_long_deformable_ITKv4_wb.sh
|
Shell
|
gpl-3.0
| 34,078 |
#!/bin/bash
/media/PiShare/Git/kiosk_JerrieMockWorld/dbuscontrol.sh volumedown #using dbus controller to send key strokes to omxplayer syntax: dbuscontrol.sh <variable>
#echo -n "-" > /bin/omxfifo
exit 0
|
samhlabs/kiosk_JerrieMockWorld
|
volumeDown.sh
|
Shell
|
gpl-3.0
| 205 |
#!/system/bin/sh
# Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
target=`getprop ro.board.platform`
case "$target" in
"msm7201a_ffa" | "msm7201a_surf" | "msm7627_ffa" | "msm7627_6x" | "msm7627a" | "msm7627_surf" | \
"qsd8250_surf" | "qsd8250_ffa" | "msm7630_surf" | "msm7630_1x" | "msm7630_fusion" | "qsd8650a_st1x")
echo "ondemand" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo 90 > /sys/devices/system/cpu/cpufreq/ondemand/up_threshold
;;
esac
case "$target" in
"msm7201a_ffa" | "msm7201a_surf")
echo 500000 > /sys/devices/system/cpu/cpufreq/ondemand/sampling_rate
;;
esac
case "$target" in
"msm7630_surf" | "msm7630_1x" | "msm7630_fusion")
echo 75000 > /sys/devices/system/cpu/cpufreq/ondemand/sampling_rate
echo 1 > /sys/module/pm2/parameters/idle_sleep_mode
;;
esac
case "$target" in
"msm7201a_ffa" | "msm7201a_surf" | "msm7627_ffa" | "msm7627_6x" | "msm7627_surf" | "msm7630_surf" | "msm7630_1x" | "msm7630_fusion" | "msm7627a" )
echo 245760 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
;;
esac
case "$target" in
"msm8660")
echo 1 > /sys/module/rpm_resources/enable_low_power/L2_cache
echo 1 > /sys/module/rpm_resources/enable_low_power/pxo
echo 2 > /sys/module/rpm_resources/enable_low_power/vdd_dig
echo 2 > /sys/module/rpm_resources/enable_low_power/vdd_mem
echo 1 > /sys/module/rpm_resources/enable_low_power/rpm_cpu
echo 1 > /sys/module/msm_pm/modes/cpu0/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/standalone_power_collapse/idle_enabled
echo "ondemand" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo "ondemand" > /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
echo 50000 > /sys/devices/system/cpu/cpufreq/ondemand/sampling_rate
echo 90 > /sys/devices/system/cpu/cpufreq/ondemand/up_threshold
echo 1 > /sys/devices/system/cpu/cpufreq/ondemand/io_is_busy
echo 4 > /sys/devices/system/cpu/cpufreq/ondemand/sampling_down_factor
echo 384000 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
echo 384000 > /sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq
chown -h system /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq
chown -h system /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
chown -h system /sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq
chown -h system /sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq
chown -h root.system /sys/devices/system/cpu/mfreq
chmod -h 220 /sys/devices/system/cpu/mfreq
chown -h root.system /sys/devices/system/cpu/cpu1/online
chmod -h 664 /sys/devices/system/cpu/cpu1/online
;;
esac
case "$target" in
"msm8960")
echo 1 > /sys/module/rpm_resources/enable_low_power/L2_cache
echo 1 > /sys/module/rpm_resources/enable_low_power/pxo
echo 1 > /sys/module/rpm_resources/enable_low_power/vdd_dig
echo 1 > /sys/module/rpm_resources/enable_low_power/vdd_mem
echo 1 > /sys/module/msm_pm/modes/cpu0/retention/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/power_collapse/idle_enabled
echo 0 > /sys/module/msm_thermal/core_control/enabled
echo 1 > /sys/devices/system/cpu/cpu1/online
echo 1 > /sys/devices/system/cpu/cpu2/online
echo 1 > /sys/devices/system/cpu/cpu3/online
echo "ondemand" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo "ondemand" > /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
echo "ondemand" > /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor
echo "ondemand" > /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor
echo 50000 > /sys/devices/system/cpu/cpufreq/ondemand/sampling_rate
echo 90 > /sys/devices/system/cpu/cpufreq/ondemand/up_threshold
echo 1 > /sys/devices/system/cpu/cpufreq/ondemand/io_is_busy
echo 4 > /sys/devices/system/cpu/cpufreq/ondemand/sampling_down_factor
echo 10 > /sys/devices/system/cpu/cpufreq/ondemand/down_differential
echo 70 > /sys/devices/system/cpu/cpufreq/ondemand/up_threshold_multi_core
echo 3 > /sys/devices/system/cpu/cpufreq/ondemand/down_differential_multi_core
echo 918000 > /sys/devices/system/cpu/cpufreq/ondemand/optimal_freq
echo 1026000 > /sys/devices/system/cpu/cpufreq/ondemand/sync_freq
echo 80 > /sys/devices/system/cpu/cpufreq/ondemand/up_threshold_any_cpu_load
chown -h system /sys/devices/system/cpu/cpufreq/ondemand/sampling_rate
chown -h system /sys/devices/system/cpu/cpufreq/ondemand/sampling_down_factor
chown -h system /sys/devices/system/cpu/cpufreq/ondemand/io_is_busy
echo 384000 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
echo 384000 > /sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq
echo 384000 > /sys/devices/system/cpu/cpu2/cpufreq/scaling_min_freq
echo 384000 > /sys/devices/system/cpu/cpu3/cpufreq/scaling_min_freq
chown -h system /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq
chown -h system /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
chown -h system /sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq
chown -h system /sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq
chown -h system /sys/devices/system/cpu/cpu2/cpufreq/scaling_max_freq
chown -h system /sys/devices/system/cpu/cpu2/cpufreq/scaling_min_freq
chown -h system /sys/devices/system/cpu/cpu3/cpufreq/scaling_max_freq
chown -h system /sys/devices/system/cpu/cpu3/cpufreq/scaling_min_freq
echo 1 > /sys/module/msm_thermal/core_control/enabled
chown -h root.system /sys/devices/system/cpu/mfreq
chmod -h 220 /sys/devices/system/cpu/mfreq
chown -h root.system /sys/devices/system/cpu/cpu1/online
chown -h root.system /sys/devices/system/cpu/cpu2/online
chown -h root.system /sys/devices/system/cpu/cpu3/online
chmod -h 664 /sys/devices/system/cpu/cpu1/online
chmod -h 664 /sys/devices/system/cpu/cpu2/online
chmod -h 664 /sys/devices/system/cpu/cpu3/online
# set DCVS parameters for CPU
echo 40000 > /sys/module/msm_dcvs/cores/cpu0/slack_time_max_us
echo 40000 > /sys/module/msm_dcvs/cores/cpu0/slack_time_min_us
echo 100000 > /sys/module/msm_dcvs/cores/cpu0/em_win_size_min_us
echo 500000 > /sys/module/msm_dcvs/cores/cpu0/em_win_size_max_us
echo 0 > /sys/module/msm_dcvs/cores/cpu0/slack_mode_dynamic
echo 1000000 > /sys/module/msm_dcvs/cores/cpu0/disable_pc_threshold
echo 25000 > /sys/module/msm_dcvs/cores/cpu1/slack_time_max_us
echo 25000 > /sys/module/msm_dcvs/cores/cpu1/slack_time_min_us
echo 100000 > /sys/module/msm_dcvs/cores/cpu1/em_win_size_min_us
echo 500000 > /sys/module/msm_dcvs/cores/cpu1/em_win_size_max_us
echo 0 > /sys/module/msm_dcvs/cores/cpu1/slack_mode_dynamic
echo 1000000 > /sys/module/msm_dcvs/cores/cpu1/disable_pc_threshold
echo 25000 > /sys/module/msm_dcvs/cores/cpu2/slack_time_max_us
echo 25000 > /sys/module/msm_dcvs/cores/cpu2/slack_time_min_us
echo 100000 > /sys/module/msm_dcvs/cores/cpu2/em_win_size_min_us
echo 500000 > /sys/module/msm_dcvs/cores/cpu2/em_win_size_max_us
echo 0 > /sys/module/msm_dcvs/cores/cpu2/slack_mode_dynamic
echo 1000000 > /sys/module/msm_dcvs/cores/cpu2/disable_pc_threshold
echo 25000 > /sys/module/msm_dcvs/cores/cpu3/slack_time_max_us
echo 25000 > /sys/module/msm_dcvs/cores/cpu3/slack_time_min_us
echo 100000 > /sys/module/msm_dcvs/cores/cpu3/em_win_size_min_us
echo 500000 > /sys/module/msm_dcvs/cores/cpu3/em_win_size_max_us
echo 0 > /sys/module/msm_dcvs/cores/cpu3/slack_mode_dynamic
echo 1000000 > /sys/module/msm_dcvs/cores/cpu3/disable_pc_threshold
# set DCVS parameters for GPU
echo 20000 > /sys/module/msm_dcvs/cores/gpu0/slack_time_max_us
echo 20000 > /sys/module/msm_dcvs/cores/gpu0/slack_time_min_us
echo 0 > /sys/module/msm_dcvs/cores/gpu0/slack_mode_dynamic
# set msm_mpdecision parameters
echo 45000 > /sys/module/msm_mpdecision/slack_time_max_us
echo 15000 > /sys/module/msm_mpdecision/slack_time_min_us
echo 100000 > /sys/module/msm_mpdecision/em_win_size_min_us
echo 1000000 > /sys/module/msm_mpdecision/em_win_size_max_us
echo 3 > /sys/module/msm_mpdecision/online_util_pct_min
echo 25 > /sys/module/msm_mpdecision/online_util_pct_max
echo 97 > /sys/module/msm_mpdecision/em_max_util_pct
echo 2 > /sys/module/msm_mpdecision/rq_avg_poll_ms
echo 10 > /sys/module/msm_mpdecision/mp_em_rounding_point_min
echo 85 > /sys/module/msm_mpdecision/mp_em_rounding_point_max
echo 50 > /sys/module/msm_mpdecision/iowait_threshold_pct
#set permissions for the nodes needed by display on/off hook
chown -h system /sys/module/msm_dcvs/cores/cpu0/slack_time_max_us
chown -h system /sys/module/msm_dcvs/cores/cpu0/slack_time_min_us
chown -h system /sys/module/msm_mpdecision/slack_time_max_us
chown -h system /sys/module/msm_mpdecision/slack_time_min_us
chmod -h 664 /sys/module/msm_dcvs/cores/cpu0/slack_time_max_us
chmod -h 664 /sys/module/msm_dcvs/cores/cpu0/slack_time_min_us
chmod -h 664 /sys/module/msm_mpdecision/slack_time_max_us
chmod -h 664 /sys/module/msm_mpdecision/slack_time_min_us
if [ -f /sys/devices/soc0/soc_id ]; then
soc_id=`cat /sys/devices/soc0/soc_id`
else
soc_id=`cat /sys/devices/system/soc/soc0/id`
fi
case "$soc_id" in
"130")
echo 230 > /sys/class/gpio/export
echo 228 > /sys/class/gpio/export
echo 229 > /sys/class/gpio/export
echo "in" > /sys/class/gpio/gpio230/direction
echo "rising" > /sys/class/gpio/gpio230/edge
echo "in" > /sys/class/gpio/gpio228/direction
echo "rising" > /sys/class/gpio/gpio228/edge
echo "in" > /sys/class/gpio/gpio229/direction
echo "rising" > /sys/class/gpio/gpio229/edge
echo 253 > /sys/class/gpio/export
echo 254 > /sys/class/gpio/export
echo 257 > /sys/class/gpio/export
echo 258 > /sys/class/gpio/export
echo 259 > /sys/class/gpio/export
echo "out" > /sys/class/gpio/gpio253/direction
echo "out" > /sys/class/gpio/gpio254/direction
echo "out" > /sys/class/gpio/gpio257/direction
echo "out" > /sys/class/gpio/gpio258/direction
echo "out" > /sys/class/gpio/gpio259/direction
chown -h media /sys/class/gpio/gpio253/value
chown -h media /sys/class/gpio/gpio254/value
chown -h media /sys/class/gpio/gpio257/value
chown -h media /sys/class/gpio/gpio258/value
chown -h media /sys/class/gpio/gpio259/value
chown -h media /sys/class/gpio/gpio253/direction
chown -h media /sys/class/gpio/gpio254/direction
chown -h media /sys/class/gpio/gpio257/direction
chown -h media /sys/class/gpio/gpio258/direction
chown -h media /sys/class/gpio/gpio259/direction
echo 0 > /sys/module/rpm_resources/enable_low_power/vdd_dig
echo 0 > /sys/module/rpm_resources/enable_low_power/vdd_mem
;;
esac
;;
esac
case "$target" in
"msm8974")
echo 4 > /sys/module/lpm_levels/enable_low_power/l2
echo 1 > /sys/module/msm_pm/modes/cpu0/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/retention/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/retention/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/retention/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/retention/idle_enabled
echo 0 > /sys/module/msm_thermal/core_control/enabled
echo 1 > /sys/devices/system/cpu/cpu1/online
echo 1 > /sys/devices/system/cpu/cpu2/online
echo 1 > /sys/devices/system/cpu/cpu3/online
if [ -f /sys/devices/soc0/soc_id ]; then
soc_id=`cat /sys/devices/soc0/soc_id`
else
soc_id=`cat /sys/devices/system/soc/soc0/id`
fi
case "$soc_id" in
"208" | "211" | "214" | "217" | "209" | "212" | "215" | "218" | "194" | "210" | "213" | "216")
for devfreq_gov in /sys/class/devfreq/qcom,cpubw*/governor
do
echo "cpubw_hwmon" > $devfreq_gov
done
echo "interactive" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo "interactive" > /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
echo "interactive" > /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor
echo "interactive" > /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor
echo "20000 1400000:40000 1700000:20000" > /sys/devices/system/cpu/cpufreq/interactive/above_hispeed_delay
echo 90 > /sys/devices/system/cpu/cpufreq/interactive/go_hispeed_load
echo 1190400 > /sys/devices/system/cpu/cpufreq/interactive/hispeed_freq
echo 1 > /sys/devices/system/cpu/cpufreq/interactive/io_is_busy
echo "85 1500000:90 1800000:70" > /sys/devices/system/cpu/cpufreq/interactive/target_loads
echo 40000 > /sys/devices/system/cpu/cpufreq/interactive/min_sample_time
echo 20 > /sys/module/cpu_boost/parameters/boost_ms
echo 1728000 > /sys/module/cpu_boost/parameters/sync_threshold
echo 100000 > /sys/devices/system/cpu/cpufreq/interactive/sampling_down_factor
echo 1497600 > /sys/module/cpu_boost/parameters/input_boost_freq
echo 40 > /sys/module/cpu_boost/parameters/input_boost_ms
setprop ro.qualcomm.perf.cores_online 2
;;
*)
echo "ondemand" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo "ondemand" > /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
echo "ondemand" > /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor
echo "ondemand" > /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor
echo 50000 > /sys/devices/system/cpu/cpufreq/ondemand/sampling_rate
echo 90 > /sys/devices/system/cpu/cpufreq/ondemand/up_threshold
echo 1 > /sys/devices/system/cpu/cpufreq/ondemand/io_is_busy
echo 2 > /sys/devices/system/cpu/cpufreq/ondemand/sampling_down_factor
echo 10 > /sys/devices/system/cpu/cpufreq/ondemand/down_differential
echo 70 > /sys/devices/system/cpu/cpufreq/ondemand/up_threshold_multi_core
echo 3 > /sys/devices/system/cpu/cpufreq/ondemand/down_differential_multi_core
echo 960000 > /sys/devices/system/cpu/cpufreq/ondemand/optimal_freq
echo 960000 > /sys/devices/system/cpu/cpufreq/ondemand/sync_freq
echo 1190400 > /sys/devices/system/cpu/cpufreq/ondemand/input_boost
echo 80 > /sys/devices/system/cpu/cpufreq/ondemand/up_threshold_any_cpu_load
;;
esac
echo 300000 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
echo 300000 > /sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq
echo 300000 > /sys/devices/system/cpu/cpu2/cpufreq/scaling_min_freq
echo 300000 > /sys/devices/system/cpu/cpu3/cpufreq/scaling_min_freq
chown -h system /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq
chown -h system /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
echo 1 > /sys/module/msm_thermal/core_control/enabled
chown -h root.system /sys/devices/system/cpu/mfreq
chmod -h 220 /sys/devices/system/cpu/mfreq
chown -h root.system /sys/devices/system/cpu/cpu1/online
chown -h root.system /sys/devices/system/cpu/cpu2/online
chown -h root.system /sys/devices/system/cpu/cpu3/online
chmod -h 664 /sys/devices/system/cpu/cpu1/online
chmod -h 664 /sys/devices/system/cpu/cpu2/online
chmod -h 664 /sys/devices/system/cpu/cpu3/online
echo 1 > /dev/cpuctl/apps/cpu.notify_on_migrate
;;
esac
case "$target" in
"msm8916")
if [ -f /sys/devices/soc0/soc_id ]; then
soc_id=`cat /sys/devices/soc0/soc_id`
else
soc_id=`cat /sys/devices/system/soc/soc0/id`
fi
case "$soc_id" in
"206" | "247" | "248" | "249" | "250")
echo 0 > /sys/module/lpm_levels/parameters/sleep_disabled
echo 1 > /sys/devices/system/cpu/cpu1/online
echo 1 > /sys/devices/system/cpu/cpu2/online
echo 1 > /sys/devices/system/cpu/cpu3/online
;;
"239" | "241" | "263")
if [ -f /sys/devices/soc0/revision ]; then
revision=`cat /sys/devices/soc0/revision`
else
revision=`cat /sys/devices/system/soc/soc0/revision`
fi
echo 10 > /sys/class/net/rmnet0/queues/rx-0/rps_cpus
if [ -f /sys/devices/soc0/platform_subtype_id ]; then
platform_subtype_id=`cat /sys/devices/soc0/platform_subtype_id`
fi
if [ -f /sys/devices/soc0/hw_platform ]; then
hw_platform=`cat /sys/devices/soc0/hw_platform`
fi
case "$soc_id" in
"239")
case "$hw_platform" in
"Surf")
case "$platform_subtype_id" in
"1" | "2")
start hbtp
;;
esac
;;
"MTP")
case "$platform_subtype_id" in
"3")
start hbtp
;;
esac
;;
esac
;;
esac
;;
"268" | "269" | "270" | "271")
echo 10 > /sys/class/net/rmnet0/queues/rx-0/rps_cpus
;;
"233" | "240" | "242")
echo 1 > /sys/devices/system/cpu/cpu1/online
echo 1 > /sys/devices/system/cpu/cpu2/online
echo 1 > /sys/devices/system/cpu/cpu3/online
;;
esac
;;
esac
case "$target" in
"msm8226")
echo 4 > /sys/module/lpm_levels/enable_low_power/l2
echo 1 > /sys/module/msm_pm/modes/cpu0/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/power_collapse/idle_enabled
echo 1 > /sys/devices/system/cpu/cpu1/online
echo 1 > /sys/devices/system/cpu/cpu2/online
echo 1 > /sys/devices/system/cpu/cpu3/online
echo "ondemand" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo 50000 > /sys/devices/system/cpu/cpufreq/ondemand/sampling_rate
echo 90 > /sys/devices/system/cpu/cpufreq/ondemand/up_threshold
echo 1 > /sys/devices/system/cpu/cpufreq/ondemand/io_is_busy
echo 2 > /sys/devices/system/cpu/cpufreq/ondemand/sampling_down_factor
echo 10 > /sys/devices/system/cpu/cpufreq/ondemand/down_differential
echo 70 > /sys/devices/system/cpu/cpufreq/ondemand/up_threshold_multi_core
echo 10 > /sys/devices/system/cpu/cpufreq/ondemand/down_differential_multi_core
echo 787200 > /sys/devices/system/cpu/cpufreq/ondemand/optimal_freq
echo 300000 > /sys/devices/system/cpu/cpufreq/ondemand/sync_freq
echo 80 > /sys/devices/system/cpu/cpufreq/ondemand/up_threshold_any_cpu_load
echo 300000 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
chown -h system /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq
chown -h system /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
chown -h root.system /sys/devices/system/cpu/cpu1/online
chown -h root.system /sys/devices/system/cpu/cpu2/online
chown -h root.system /sys/devices/system/cpu/cpu3/online
chmod -h 664 /sys/devices/system/cpu/cpu1/online
chmod -h 664 /sys/devices/system/cpu/cpu2/online
chmod -h 664 /sys/devices/system/cpu/cpu3/online
;;
esac
case "$target" in
"msm8610")
echo 4 > /sys/module/lpm_levels/enable_low_power/l2
echo 1 > /sys/module/msm_pm/modes/cpu0/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/power_collapse/idle_enabled
echo 1 > /sys/devices/system/cpu/cpu1/online
echo 1 > /sys/devices/system/cpu/cpu2/online
echo 1 > /sys/devices/system/cpu/cpu3/online
echo "ondemand" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo 50000 > /sys/devices/system/cpu/cpufreq/ondemand/sampling_rate
echo 90 > /sys/devices/system/cpu/cpufreq/ondemand/up_threshold
echo 1 > /sys/devices/system/cpu/cpufreq/ondemand/io_is_busy
echo 2 > /sys/devices/system/cpu/cpufreq/ondemand/sampling_down_factor
echo 10 > /sys/devices/system/cpu/cpufreq/ondemand/down_differential
echo 70 > /sys/devices/system/cpu/cpufreq/ondemand/up_threshold_multi_core
echo 10 > /sys/devices/system/cpu/cpufreq/ondemand/down_differential_multi_core
echo 787200 > /sys/devices/system/cpu/cpufreq/ondemand/optimal_freq
echo 300000 > /sys/devices/system/cpu/cpufreq/ondemand/sync_freq
echo 80 > /sys/devices/system/cpu/cpufreq/ondemand/up_threshold_any_cpu_load
echo 300000 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
setprop ro.qualcomm.perf.min_freq 7
echo 1 > /sys/kernel/mm/ksm/deferred_timer
chown -h system /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq
chown -h system /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
chown -h root.system /sys/devices/system/cpu/cpu1/online
chown -h root.system /sys/devices/system/cpu/cpu2/online
chown -h root.system /sys/devices/system/cpu/cpu3/online
chmod -h 664 /sys/devices/system/cpu/cpu1/online
chmod -h 664 /sys/devices/system/cpu/cpu2/online
chmod -h 664 /sys/devices/system/cpu/cpu3/online
;;
esac
case "$target" in
"msm8916")
if [ -f /sys/devices/soc0/soc_id ]; then
soc_id=`cat /sys/devices/soc0/soc_id`
else
soc_id=`cat /sys/devices/system/soc/soc0/id`
fi
#Enable adaptive LMK and set vmpressure_file_min
echo 1 > /sys/module/lowmemorykiller/parameters/enable_adaptive_lmk
echo 53059 > /sys/module/lowmemorykiller/parameters/vmpressure_file_min
# HMP scheduler settings for 8916, 8936, 8939, 8929
echo 3 > /proc/sys/kernel/sched_window_stats_policy
echo 3 > /proc/sys/kernel/sched_ravg_hist_size
# Apply governor settings for 8916
case "$soc_id" in
"206" | "247" | "248" | "249" | "250")
# HMP scheduler load tracking settings
echo 3 > /proc/sys/kernel/sched_ravg_hist_size
# HMP Task packing settings for 8916
echo 20 > /proc/sys/kernel/sched_small_task
echo 30 > /proc/sys/kernel/sched_mostly_idle_load
echo 3 > /proc/sys/kernel/sched_mostly_idle_nr_run
# disable thermal core_control to update scaling_min_freq
echo 0 > /sys/module/msm_thermal/core_control/enabled
echo 1 > /sys/devices/system/cpu/cpu0/online
echo "interactive" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo 800000 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
# enable thermal core_control now
echo 1 > /sys/module/msm_thermal/core_control/enabled
echo "25000 1094400:50000" > /sys/devices/system/cpu/cpufreq/interactive/above_hispeed_delay
echo 90 > /sys/devices/system/cpu/cpufreq/interactive/go_hispeed_load
echo 30000 > /sys/devices/system/cpu/cpufreq/interactive/timer_rate
echo 998400 > /sys/devices/system/cpu/cpufreq/interactive/hispeed_freq
echo 0 > /sys/devices/system/cpu/cpufreq/interactive/io_is_busy
echo "1 800000:85 998400:90 1094400:80" > /sys/devices/system/cpu/cpufreq/interactive/target_loads
echo 50000 > /sys/devices/system/cpu/cpufreq/interactive/min_sample_time
echo 50000 > /sys/devices/system/cpu/cpufreq/interactive/sampling_down_factor
# Bring up all cores online
echo 1 > /sys/devices/system/cpu/cpu1/online
echo 1 > /sys/devices/system/cpu/cpu2/online
echo 1 > /sys/devices/system/cpu/cpu3/online
echo 1 > /sys/devices/system/cpu/cpu4/online
;;
esac
# Apply governor settings for 8936
case "$soc_id" in
"233" | "240" | "242")
# HMP scheduler load tracking settings
echo 3 > /proc/sys/kernel/sched_ravg_hist_size
# HMP Task packing settings for 8936
echo 50 > /proc/sys/kernel/sched_small_task
echo 50 > /proc/sys/kernel/sched_mostly_idle_load
echo 10 > /proc/sys/kernel/sched_mostly_idle_nr_run
# disable thermal core_control to update scaling_min_freq, interactive gov
echo 0 > /sys/module/msm_thermal/core_control/enabled
echo 1 > /sys/devices/system/cpu/cpu0/online
echo "interactive" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo 800000 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
# enable thermal core_control now
echo 1 > /sys/module/msm_thermal/core_control/enabled
echo "25000 1113600:50000" > /sys/devices/system/cpu/cpufreq/interactive/above_hispeed_delay
echo 90 > /sys/devices/system/cpu/cpufreq/interactive/go_hispeed_load
echo 30000 > /sys/devices/system/cpu/cpufreq/interactive/timer_rate
echo 960000 > /sys/devices/system/cpu/cpufreq/interactive/hispeed_freq
echo 0 > /sys/devices/system/cpu/cpufreq/interactive/io_is_busy
echo "1 800000:85 1113600:90 1267200:80" > /sys/devices/system/cpu/cpufreq/interactive/target_loads
echo 50000 > /sys/devices/system/cpu/cpufreq/interactive/min_sample_time
echo 50000 > /sys/devices/system/cpu/cpufreq/interactive/sampling_down_factor
# Bring up all cores online
echo 1 > /sys/devices/system/cpu/cpu1/online
echo 1 > /sys/devices/system/cpu/cpu2/online
echo 1 > /sys/devices/system/cpu/cpu3/online
echo 1 > /sys/devices/system/cpu/cpu4/online
# Enable low power modes
echo 0 > /sys/module/lpm_levels/parameters/sleep_disabled
for gpu_bimc_io_percent in /sys/class/devfreq/qcom,gpubw*/bw_hwmon/io_percent
do
echo 40 > $gpu_bimc_io_percent
done
;;
esac
# Apply governor settings for 8939
case "$soc_id" in
"239" | "241" | "263" | "268" | "269" | "270" | "271")
if [ `cat /sys/devices/soc0/revision` != "3.0" ]; then
# Apply 1.0 and 2.0 specific Sched & Governor settings
# HMP scheduler load tracking settings
echo 5 > /proc/sys/kernel/sched_ravg_hist_size
# HMP Task packing settings for 8939, 8929
echo 20 > /proc/sys/kernel/sched_small_task
echo 30 > /proc/sys/kernel/sched_mostly_idle_load
echo 3 > /proc/sys/kernel/sched_mostly_idle_nr_run
for devfreq_gov in /sys/class/devfreq/qcom,mincpubw*/governor
do
echo "cpufreq" > $devfreq_gov
done
for devfreq_gov in /sys/class/devfreq/qcom,cpubw*/governor
do
echo "bw_hwmon" > $devfreq_gov
for cpu_io_percent in /sys/class/devfreq/qcom,cpubw*/bw_hwmon/io_percent
do
echo 20 > $cpu_io_percent
done
done
for gpu_bimc_io_percent in /sys/class/devfreq/qcom,gpubw*/bw_hwmon/io_percent
do
echo 40 > $gpu_bimc_io_percent
done
# disable thermal core_control to update interactive gov settings
echo 0 > /sys/module/msm_thermal/core_control/enabled
# enable governor for perf cluster
echo 1 > /sys/devices/system/cpu/cpu0/online
echo "interactive" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo "20000 1113600:50000" > /sys/devices/system/cpu/cpu0/cpufreq/interactive/above_hispeed_delay
echo 85 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/go_hispeed_load
echo 20000 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/timer_rate
echo 1113600 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/hispeed_freq
echo 0 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/io_is_busy
echo "1 960000:85 1113600:90 1344000:80" > /sys/devices/system/cpu/cpu0/cpufreq/interactive/target_loads
echo 50000 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/min_sample_time
echo 50000 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/sampling_down_factor
echo 960000 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
# enable governor for power cluster
echo 1 > /sys/devices/system/cpu/cpu4/online
echo "interactive" > /sys/devices/system/cpu/cpu4/cpufreq/scaling_governor
echo "25000 800000:50000" > /sys/devices/system/cpu/cpu4/cpufreq/interactive/above_hispeed_delay
echo 90 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/go_hispeed_load
echo 40000 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/timer_rate
echo 998400 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/hispeed_freq
echo 0 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/io_is_busy
echo "1 800000:90" > /sys/devices/system/cpu/cpu4/cpufreq/interactive/target_loads
echo 40000 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/min_sample_time
echo 40000 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/sampling_down_factor
echo 800000 > /sys/devices/system/cpu/cpu4/cpufreq/scaling_min_freq
# enable thermal core_control now
echo 1 > /sys/module/msm_thermal/core_control/enabled
# Bring up all cores online
echo 1 > /sys/devices/system/cpu/cpu1/online
echo 1 > /sys/devices/system/cpu/cpu2/online
echo 1 > /sys/devices/system/cpu/cpu3/online
echo 1 > /sys/devices/system/cpu/cpu4/online
echo 1 > /sys/devices/system/cpu/cpu5/online
echo 1 > /sys/devices/system/cpu/cpu6/online
echo 1 > /sys/devices/system/cpu/cpu7/online
# Enable low power modes
echo 0 > /sys/module/lpm_levels/parameters/sleep_disabled
# HMP scheduler (big.Little cluster related) settings
echo 75 > /proc/sys/kernel/sched_upmigrate
echo 60 > /proc/sys/kernel/sched_downmigrate
# cpu idle load threshold
echo 30 > /sys/devices/system/cpu/cpu0/sched_mostly_idle_load
echo 30 > /sys/devices/system/cpu/cpu1/sched_mostly_idle_load
echo 30 > /sys/devices/system/cpu/cpu2/sched_mostly_idle_load
echo 30 > /sys/devices/system/cpu/cpu3/sched_mostly_idle_load
echo 30 > /sys/devices/system/cpu/cpu4/sched_mostly_idle_load
echo 30 > /sys/devices/system/cpu/cpu5/sched_mostly_idle_load
echo 30 > /sys/devices/system/cpu/cpu6/sched_mostly_idle_load
echo 30 > /sys/devices/system/cpu/cpu7/sched_mostly_idle_load
else
# Apply 3.0 specific Sched & Governor settings
# HMP scheduler settings for 8939 V3.0
echo 3 > /proc/sys/kernel/sched_window_stats_policy
echo 3 > /proc/sys/kernel/sched_ravg_hist_size
echo 20000000 > /proc/sys/kernel/sched_ravg_window
# HMP Task packing settings for 8939 V3.0
echo 20 > /proc/sys/kernel/sched_small_task
echo 30 > /proc/sys/kernel/sched_mostly_idle_load
echo 3 > /proc/sys/kernel/sched_mostly_idle_nr_run
echo 0 > /sys/devices/system/cpu/cpu0/sched_prefer_idle
echo 0 > /sys/devices/system/cpu/cpu1/sched_prefer_idle
echo 0 > /sys/devices/system/cpu/cpu2/sched_prefer_idle
echo 0 > /sys/devices/system/cpu/cpu3/sched_prefer_idle
echo 0 > /sys/devices/system/cpu/cpu4/sched_prefer_idle
echo 0 > /sys/devices/system/cpu/cpu5/sched_prefer_idle
echo 0 > /sys/devices/system/cpu/cpu6/sched_prefer_idle
echo 0 > /sys/devices/system/cpu/cpu7/sched_prefer_idle
for devfreq_gov in /sys/class/devfreq/qcom,mincpubw*/governor
do
echo "cpufreq" > $devfreq_gov
done
for devfreq_gov in /sys/class/devfreq/qcom,cpubw*/governor
do
echo "bw_hwmon" > $devfreq_gov
for cpu_io_percent in /sys/class/devfreq/qcom,cpubw*/bw_hwmon/io_percent
do
echo 20 > $cpu_io_percent
done
done
for gpu_bimc_io_percent in /sys/class/devfreq/qcom,gpubw*/bw_hwmon/io_percent
do
echo 40 > $gpu_bimc_io_percent
done
# disable thermal core_control to update interactive gov settings
echo 0 > /sys/module/msm_thermal/core_control/enabled
# enable governor for perf cluster
echo 1 > /sys/devices/system/cpu/cpu0/online
echo "interactive" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo "19000 1113600:39000" > /sys/devices/system/cpu/cpu0/cpufreq/interactive/above_hispeed_delay
echo 85 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/go_hispeed_load
echo 20000 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/timer_rate
echo 1113600 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/hispeed_freq
echo 0 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/io_is_busy
echo "1 960000:85 1113600:90 1344000:80" > /sys/devices/system/cpu/cpu0/cpufreq/interactive/target_loads
echo 40000 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/min_sample_time
echo 40000 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/sampling_down_factor
echo 960000 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
# enable governor for power cluster
echo 1 > /sys/devices/system/cpu/cpu4/online
echo "interactive" > /sys/devices/system/cpu/cpu4/cpufreq/scaling_governor
echo 39000 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/above_hispeed_delay
echo 90 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/go_hispeed_load
echo 20000 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/timer_rate
echo 800000 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/hispeed_freq
echo 0 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/io_is_busy
echo "1 800000:90" > /sys/devices/system/cpu/cpu4/cpufreq/interactive/target_loads
echo 40000 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/min_sample_time
echo 40000 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/sampling_down_factor
echo 800000 > /sys/devices/system/cpu/cpu4/cpufreq/scaling_min_freq
# enable thermal core_control now
echo 1 > /sys/module/msm_thermal/core_control/enabled
# Bring up all cores online
echo 1 > /sys/devices/system/cpu/cpu1/online
echo 1 > /sys/devices/system/cpu/cpu2/online
echo 1 > /sys/devices/system/cpu/cpu3/online
echo 1 > /sys/devices/system/cpu/cpu5/online
echo 1 > /sys/devices/system/cpu/cpu6/online
echo 1 > /sys/devices/system/cpu/cpu7/online
# Enable low power modes
echo 0 > /sys/module/lpm_levels/parameters/sleep_disabled
# HMP scheduler (big.Little cluster related) settings
echo 93 > /proc/sys/kernel/sched_upmigrate
echo 83 > /proc/sys/kernel/sched_downmigrate
# Enable sched guided freq control
echo 1 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/use_sched_load
echo 1 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/use_migration_notif
echo 1 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/use_sched_load
echo 1 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/use_migration_notif
echo 50000 > /proc/sys/kernel/sched_freq_inc_notify
echo 50000 > /proc/sys/kernel/sched_freq_dec_notify
# Enable core control
insmod /system/lib/modules/core_ctl.ko
echo 2 > /sys/devices/system/cpu/cpu0/core_ctl/min_cpus
echo 4 > /sys/devices/system/cpu/cpu0/core_ctl/max_cpus
echo 68 > /sys/devices/system/cpu/cpu0/core_ctl/busy_up_thres
echo 40 > /sys/devices/system/cpu/cpu0/core_ctl/busy_down_thres
echo 100 > /sys/devices/system/cpu/cpu0/core_ctl/offline_delay_ms
case "$revision" in
"3.0")
# Enable dynamic clock gatin
echo 1 > /sys/module/lpm_levels/lpm_workarounds/dynamic_clock_gating
;;
esac
fi
;;
esac
;;
esac
case "$target" in
"msm8952")
# HMP scheduler settings for 8952 soc id is 264
echo 3 > /proc/sys/kernel/sched_window_stats_policy
echo 3 > /proc/sys/kernel/sched_ravg_hist_size
# HMP Task packing settings for 8952
echo 20 > /proc/sys/kernel/sched_small_task
echo 30 > /sys/devices/system/cpu/cpu0/sched_mostly_idle_load
echo 30 > /sys/devices/system/cpu/cpu1/sched_mostly_idle_load
echo 30 > /sys/devices/system/cpu/cpu2/sched_mostly_idle_load
echo 30 > /sys/devices/system/cpu/cpu3/sched_mostly_idle_load
echo 30 > /sys/devices/system/cpu/cpu4/sched_mostly_idle_load
echo 30 > /sys/devices/system/cpu/cpu5/sched_mostly_idle_load
echo 30 > /sys/devices/system/cpu/cpu6/sched_mostly_idle_load
echo 30 > /sys/devices/system/cpu/cpu7/sched_mostly_idle_load
echo 3 > /sys/devices/system/cpu/cpu0/sched_mostly_idle_nr_run
echo 3 > /sys/devices/system/cpu/cpu1/sched_mostly_idle_nr_run
echo 3 > /sys/devices/system/cpu/cpu2/sched_mostly_idle_nr_run
echo 3 > /sys/devices/system/cpu/cpu3/sched_mostly_idle_nr_run
echo 3 > /sys/devices/system/cpu/cpu4/sched_mostly_idle_nr_run
echo 3 > /sys/devices/system/cpu/cpu5/sched_mostly_idle_nr_run
echo 3 > /sys/devices/system/cpu/cpu6/sched_mostly_idle_nr_run
echo 3 > /sys/devices/system/cpu/cpu7/sched_mostly_idle_nr_run
echo 0 > /sys/devices/system/cpu/cpu0/sched_prefer_idle
echo 0 > /sys/devices/system/cpu/cpu1/sched_prefer_idle
echo 0 > /sys/devices/system/cpu/cpu2/sched_prefer_idle
echo 0 > /sys/devices/system/cpu/cpu3/sched_prefer_idle
echo 0 > /sys/devices/system/cpu/cpu4/sched_prefer_idle
echo 0 > /sys/devices/system/cpu/cpu5/sched_prefer_idle
echo 0 > /sys/devices/system/cpu/cpu6/sched_prefer_idle
echo 0 > /sys/devices/system/cpu/cpu7/sched_prefer_idle
for devfreq_gov in /sys/class/devfreq/qcom,cpubw*/governor
do
echo "bw_hwmon" > $devfreq_gov
for cpu_io_percent in /sys/class/devfreq/qcom,cpubw*/bw_hwmon/io_percent
do
echo 20 > $cpu_io_percent
done
done
for gpu_bimc_io_percent in /sys/class/devfreq/qcom,gpubw*/bw_hwmon/io_percent
do
echo 40 > $gpu_bimc_io_percent
done
# disable thermal core_control to update interactive gov settings
echo 0 > /sys/module/msm_thermal/core_control/enabled
# enable governor for perf cluster
echo 1 > /sys/devices/system/cpu/cpu0/online
echo "interactive" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo "19000 1113600:39000" > /sys/devices/system/cpu/cpu0/cpufreq/interactive/above_hispeed_delay
echo 85 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/go_hispeed_load
echo 20000 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/timer_rate
echo 1113600 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/hispeed_freq
echo 0 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/io_is_busy
echo "1 960000:85 1113600:90 1344000:80" > /sys/devices/system/cpu/cpu0/cpufreq/interactive/target_loads
echo 40000 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/min_sample_time
echo 40000 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/sampling_down_factor
echo 960000 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
# enable governor for power cluster
echo 1 > /sys/devices/system/cpu/cpu4/online
echo "interactive" > /sys/devices/system/cpu/cpu4/cpufreq/scaling_governor
echo 39000 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/above_hispeed_delay
echo 90 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/go_hispeed_load
echo 20000 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/timer_rate
echo 800000 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/hispeed_freq
echo 0 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/io_is_busy
echo "1 800000:90" > /sys/devices/system/cpu/cpu4/cpufreq/interactive/target_loads
echo 40000 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/min_sample_time
echo 40000 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/sampling_down_factor
echo 800000 > /sys/devices/system/cpu/cpu4/cpufreq/scaling_min_freq
# enable thermal core_control now
echo 1 > /sys/module/msm_thermal/core_control/enabled
# Bring up all cores online
echo 1 > /sys/devices/system/cpu/cpu1/online
echo 1 > /sys/devices/system/cpu/cpu2/online
echo 1 > /sys/devices/system/cpu/cpu3/online
echo 1 > /sys/devices/system/cpu/cpu5/online
echo 1 > /sys/devices/system/cpu/cpu6/online
echo 1 > /sys/devices/system/cpu/cpu7/online
# HMP scheduler (big.Little cluster related) settings
echo 93 > /proc/sys/kernel/sched_upmigrate
echo 70 > /proc/sys/kernel/sched_downmigrate
# Enable sched guided freq control
echo 1 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/use_sched_load
echo 1 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/use_migration_notif
echo 1 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/use_sched_load
echo 1 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/use_migration_notif
echo 50000 > /proc/sys/kernel/sched_freq_inc_notify
echo 50000 > /proc/sys/kernel/sched_freq_dec_notify
# Enable core control
insmod /system/lib/modules/core_ctl.ko
echo 2 > /sys/devices/system/cpu/cpu0/core_ctl/min_cpus
echo 4 > /sys/devices/system/cpu/cpu0/core_ctl/max_cpus
echo 68 > /sys/devices/system/cpu/cpu0/core_ctl/busy_up_thres
echo 40 > /sys/devices/system/cpu/cpu0/core_ctl/busy_down_thres
echo 100 > /sys/devices/system/cpu/cpu0/core_ctl/offline_delay_ms
;;
esac
case "$target" in
"apq8084")
echo 4 > /sys/module/lpm_levels/enable_low_power/l2
echo 1 > /sys/module/msm_pm/modes/cpu0/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/retention/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/retention/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/retention/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/retention/idle_enabled
echo 0 > /sys/module/msm_thermal/core_control/enabled
echo 1 > /sys/devices/system/cpu/cpu1/online
echo 1 > /sys/devices/system/cpu/cpu2/online
echo 1 > /sys/devices/system/cpu/cpu3/online
for devfreq_gov in /sys/class/devfreq/qcom,cpubw*/governor
do
echo "cpubw_hwmon" > $devfreq_gov
done
echo "interactive" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo "interactive" > /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
echo "interactive" > /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor
echo "interactive" > /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor
echo "20000 1400000:40000 1700000:20000" > /sys/devices/system/cpu/cpufreq/interactive/above_hispeed_delay
echo 90 > /sys/devices/system/cpu/cpufreq/interactive/go_hispeed_load
echo 1497600 > /sys/devices/system/cpu/cpufreq/interactive/hispeed_freq
echo "85 1500000:90 1800000:70" > /sys/devices/system/cpu/cpufreq/interactive/target_loads
echo 40000 > /sys/devices/system/cpu/cpufreq/interactive/min_sample_time
echo 20 > /sys/module/cpu_boost/parameters/boost_ms
echo 1728000 > /sys/module/cpu_boost/parameters/sync_threshold
echo 100000 > /sys/devices/system/cpu/cpufreq/interactive/sampling_down_factor
echo 1497600 > /sys/module/cpu_boost/parameters/input_boost_freq
echo 40 > /sys/module/cpu_boost/parameters/input_boost_ms
echo 1 > /dev/cpuctl/apps/cpu.notify_on_migrate
echo 300000 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
echo 300000 > /sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq
echo 300000 > /sys/devices/system/cpu/cpu2/cpufreq/scaling_min_freq
echo 300000 > /sys/devices/system/cpu/cpu3/cpufreq/scaling_min_freq
echo 1 > /sys/module/msm_thermal/core_control/enabled
setprop ro.qualcomm.perf.cores_online 2
chown -h system /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq
chown -h system /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
chown -h root.system /sys/devices/system/cpu/mfreq
chmod -h 220 /sys/devices/system/cpu/mfreq
chown -h root.system /sys/devices/system/cpu/cpu1/online
chown -h root.system /sys/devices/system/cpu/cpu2/online
chown -h root.system /sys/devices/system/cpu/cpu3/online
chmod -h 664 /sys/devices/system/cpu/cpu1/online
chmod -h 664 /sys/devices/system/cpu/cpu2/online
chmod -h 664 /sys/devices/system/cpu/cpu3/online
;;
esac
case "$target" in
"mpq8092")
echo 4 > /sys/module/lpm_levels/enable_low_power/l2
echo 1 > /sys/module/msm_pm/modes/cpu0/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu0/retention/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu1/retention/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu2/retention/idle_enabled
echo 1 > /sys/module/msm_pm/modes/cpu3/retention/idle_enabled
echo 0 > /sys/module/msm_thermal/core_control/enabled
echo 1 > /sys/devices/system/cpu/cpu1/online
echo 1 > /sys/devices/system/cpu/cpu2/online
echo 1 > /sys/devices/system/cpu/cpu3/online
echo "ondemand" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo "ondemand" > /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
echo "ondemand" > /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor
echo "ondemand" > /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor
echo 50000 > /sys/devices/system/cpu/cpufreq/ondemand/sampling_rate
echo 90 > /sys/devices/system/cpu/cpufreq/ondemand/up_threshold
echo 1 > /sys/devices/system/cpu/cpufreq/ondemand/io_is_busy
echo 300000 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
echo 300000 > /sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq
echo 300000 > /sys/devices/system/cpu/cpu2/cpufreq/scaling_min_freq
echo 300000 > /sys/devices/system/cpu/cpu3/cpufreq/scaling_min_freq
echo 1 > /sys/module/msm_thermal/core_control/enabled
chown -h system /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq
chown -h system /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
chown -h root.system /sys/devices/system/cpu/mfreq
chmod -h 220 /sys/devices/system/cpu/mfreq
chown -h root.system /sys/devices/system/cpu/cpu1/online
chown -h root.system /sys/devices/system/cpu/cpu2/online
chown -h root.system /sys/devices/system/cpu/cpu3/online
chmod -h 664 /sys/devices/system/cpu/cpu1/online
chmod -h 664 /sys/devices/system/cpu/cpu2/online
chmod -h 664 /sys/devices/system/cpu/cpu3/online
;;
esac
case "$target" in
"msm8994")
echo 0 > /sys/module/msm_thermal/core_control/enabled
echo -n disable > /sys/devices/soc.*/qcom,bcl.*/mode
bcl_hotplug_mask=`cat /sys/devices/soc.*/qcom,bcl.*/hotplug_mask`
echo 0 > /sys/devices/soc.*/qcom,bcl.*/hotplug_mask
echo -n enable > /sys/devices/soc.*/qcom,bcl.*/mode
echo 1 > /sys/devices/system/cpu/cpu4/online
echo 1 > /sys/devices/system/cpu/cpu5/online
echo 1 > /sys/devices/system/cpu/cpu6/online
echo 1 > /sys/devices/system/cpu/cpu7/online
echo 0 > /sys/module/lpm_levels/parameters/sleep_disabled
# configure governor settings for little cluster
echo "interactive" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo 1 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/use_sched_load
echo 0 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/use_migration_notif
echo "20000 750000:40000 800000:20000" > /sys/devices/system/cpu/cpu0/cpufreq/interactive/above_hispeed_delay
echo 90 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/go_hispeed_load
echo 20000 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/timer_rate
echo 768000 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/hispeed_freq
echo 0 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/io_is_busy
echo "85 780000:90" > /sys/devices/system/cpu/cpu0/cpufreq/interactive/target_loads
echo 40000 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/min_sample_time
echo 384000 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
# configure governor settings for big cluster
echo "interactive" > /sys/devices/system/cpu/cpu4/cpufreq/scaling_governor
echo 1 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/use_sched_load
echo 0 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/use_migration_notif
echo "20000 750000:40000 800000:20000" > /sys/devices/system/cpu/cpu4/cpufreq/interactive/above_hispeed_delay
echo 99 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/go_hispeed_load
echo 20000 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/timer_rate
echo 768000 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/hispeed_freq
echo 0 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/io_is_busy
echo "85 780000:90" > /sys/devices/system/cpu/cpu4/cpufreq/interactive/target_loads
echo 40000 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/min_sample_time
echo 384000 > /sys/devices/system/cpu/cpu4/cpufreq/scaling_min_freq
echo 1 > /sys/module/msm_thermal/core_control/enabled
echo -n disable > /sys/devices/soc.*/qcom,bcl.*/mode
echo $bcl_hotplug_mask > /sys/devices/soc.*/qcom,bcl.*/hotplug_mask
echo -n enable > /sys/devices/soc.*/qcom,bcl.*/mode
# Enable task migration fixups in the scheduler
echo 1 > /proc/sys/kernel/sched_migration_fixup
for devfreq_gov in /sys/class/devfreq/qcom,cpubw*/governor
do
echo "bw_hwmon" > $devfreq_gov
done
/system/bin/energy-awareness
#enable rps static configuration
echo 8 > /sys/class/net/rmnet_ipa0/queues/rx-0/rps_cpus
echo 30 > /proc/sys/kernel/sched_small_task
;;
esac
case "$target" in
"msm8909")
if [ -f /sys/devices/soc0/soc_id ]; then
soc_id=`cat /sys/devices/soc0/soc_id`
else
soc_id=`cat /sys/devices/system/soc/soc0/id`
fi
#Enable adaptive LMK and set vmpressure_file_min
echo 1 > /sys/module/lowmemorykiller/parameters/enable_adaptive_lmk
echo 53059 > /sys/module/lowmemorykiller/parameters/vmpressure_file_min
# HMP scheduler settings for 8909 similiar to 8916
echo 3 > /proc/sys/kernel/sched_window_stats_policy
echo 3 > /proc/sys/kernel/sched_ravg_hist_size
# HMP Task packing settings for 8909 similiar to 8916
echo 30 > /proc/sys/kernel/sched_small_task
echo 50 > /proc/sys/kernel/sched_mostly_idle_load
echo 3 > /proc/sys/kernel/sched_mostly_idle_nr_run
# disable thermal core_control to update scaling_min_freq
echo 0 > /sys/module/msm_thermal/core_control/enabled
echo 1 > /sys/devices/system/cpu/cpu0/online
echo "interactive" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo 800000 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
# enable thermal core_control now
echo 1 > /sys/module/msm_thermal/core_control/enabled
echo "30000 1094400:50000" > /sys/devices/system/cpu/cpufreq/interactive/above_hispeed_delay
echo 90 > /sys/devices/system/cpu/cpufreq/interactive/go_hispeed_load
echo 30000 > /sys/devices/system/cpu/cpufreq/interactive/timer_rate
echo 998400 > /sys/devices/system/cpu/cpufreq/interactive/hispeed_freq
echo 0 > /sys/devices/system/cpu/cpufreq/interactive/io_is_busy
echo "1 800000:85 998400:90 1094400:80" > /sys/devices/system/cpu/cpufreq/interactive/target_loads
echo 50000 > /sys/devices/system/cpu/cpufreq/interactive/min_sample_time
echo 50000 > /sys/devices/system/cpu/cpufreq/interactive/sampling_down_factor
# Bring up all cores online
echo 1 > /sys/devices/system/cpu/cpu1/online
echo 1 > /sys/devices/system/cpu/cpu2/online
echo 1 > /sys/devices/system/cpu/cpu3/online
echo 0 > /sys/module/lpm_levels/parameters/sleep_disabled
# Enable core control
insmod /system/lib/modules/core_ctl.ko
echo 2 > /sys/devices/system/cpu/cpu0/core_ctl/min_cpus
echo 72 72 60 50 > /sys/devices/system/cpu/cpu0/core_ctl/busy_up_thres
echo 30 > /sys/devices/system/cpu/cpu0/core_ctl/busy_down_thres
echo 100 > /sys/devices/system/cpu/cpu0/core_ctl/offline_delay_ms
# Apply governor settings for 8909
for devfreq_gov in /sys/class/devfreq/qcom,cpubw*/governor
do
echo "bw_hwmon" > $devfreq_gov
for cpu_bimc_bw_step in /sys/class/devfreq/qcom,cpubw*/bw_hwmon/bw_step
do
echo 60 > $cpu_bimc_bw_step
done
for cpu_guard_band_mbps in /sys/class/devfreq/qcom,cpubw*/bw_hwmon/guard_band_mbps
do
echo 30 > $cpu_guard_band_mbps
done
done
for gpu_bimc_io_percent in /sys/class/devfreq/qcom,gpubw*/bw_hwmon/io_percent
do
echo 40 > $gpu_bimc_io_percent
done
for gpu_bimc_bw_step in /sys/class/devfreq/qcom,gpubw*/bw_hwmon/bw_step
do
echo 60 > $gpu_bimc_bw_step
done
for gpu_bimc_guard_band_mbps in /sys/class/devfreq/qcom,gpubw*/bw_hwmon/guard_band_mbps
do
echo 30 > $gpu_bimc_guard_band_mbps
done
;;
esac
case "$target" in
"msm7627_ffa" | "msm7627_surf" | "msm7627_6x")
echo 25000 > /sys/devices/system/cpu/cpufreq/ondemand/sampling_rate
;;
esac
case "$target" in
"qsd8250_surf" | "qsd8250_ffa" | "qsd8650a_st1x")
echo 50000 > /sys/devices/system/cpu/cpufreq/ondemand/sampling_rate
;;
esac
case "$target" in
"qsd8650a_st1x")
mount -t debugfs none /sys/kernel/debug
;;
esac
chown -h system /sys/devices/system/cpu/cpufreq/ondemand/sampling_rate
chown -h system /sys/devices/system/cpu/cpufreq/ondemand/sampling_down_factor
chown -h system /sys/devices/system/cpu/cpufreq/ondemand/io_is_busy
emmc_boot=`getprop ro.boot.emmc`
case "$emmc_boot"
in "true")
chown -h system /sys/devices/platform/rs300000a7.65536/force_sync
chown -h system /sys/devices/platform/rs300000a7.65536/sync_sts
chown -h system /sys/devices/platform/rs300100a7.65536/force_sync
chown -h system /sys/devices/platform/rs300100a7.65536/sync_sts
;;
esac
case "$target" in
"msm8960" | "msm8660" | "msm7630_surf")
echo 10 > /sys/devices/platform/msm_sdcc.3/idle_timeout
;;
"msm7627a")
echo 10 > /sys/devices/platform/msm_sdcc.1/idle_timeout
;;
esac
# Post-setup services
case "$target" in
"msm8660" | "msm8960" | "msm8226" | "msm8610" | "mpq8092" | "msm8916" )
start mpdecision
;;
"msm8909")
start perfd
;;
"msm8974")
start mpdecision
echo 512 > /sys/block/mmcblk0/bdi/read_ahead_kb
;;
"msm8994")
rm /data/system/default_values
setprop ro.min_freq_0 384000
setprop ro.min_freq_4 384000
start perfd
;;
"apq8084")
rm /data/system/default_values
start mpdecision
echo 512 > /sys/block/mmcblk0/bdi/read_ahead_kb
echo 512 > /sys/block/sda/bdi/read_ahead_kb
echo 512 > /sys/block/sdb/bdi/read_ahead_kb
echo 512 > /sys/block/sdc/bdi/read_ahead_kb
echo 512 > /sys/block/sdd/bdi/read_ahead_kb
echo 512 > /sys/block/sde/bdi/read_ahead_kb
echo 512 > /sys/block/sdf/bdi/read_ahead_kb
echo 512 > /sys/block/sdg/bdi/read_ahead_kb
echo 512 > /sys/block/sdh/bdi/read_ahead_kb
;;
"msm7627a")
if [ -f /sys/devices/soc0/soc_id ]; then
soc_id=`cat /sys/devices/soc0/soc_id`
else
soc_id=`cat /sys/devices/system/soc/soc0/id`
fi
case "$soc_id" in
"127" | "128" | "129")
start mpdecision
;;
esac
;;
esac
# Enable Power modes and set the CPU Freq Sampling rates
case "$target" in
"msm7627a")
start qosmgrd
echo 1 > /sys/module/pm2/modes/cpu0/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/pm2/modes/cpu1/standalone_power_collapse/idle_enabled
echo 1 > /sys/module/pm2/modes/cpu0/standalone_power_collapse/suspend_enabled
echo 1 > /sys/module/pm2/modes/cpu1/standalone_power_collapse/suspend_enabled
#SuspendPC:
echo 1 > /sys/module/pm2/modes/cpu0/power_collapse/suspend_enabled
#IdlePC:
echo 1 > /sys/module/pm2/modes/cpu0/power_collapse/idle_enabled
echo 25000 > /sys/devices/system/cpu/cpufreq/ondemand/sampling_rate
;;
esac
# Change adj level and min_free_kbytes setting for lowmemory killer to kick in
case "$target" in
"msm7627a")
echo 0,1,2,4,9,12 > /sys/module/lowmemorykiller/parameters/adj
echo 5120 > /proc/sys/vm/min_free_kbytes
;;
esac
# Install AdrenoTest.apk if not already installed
if [ -f /data/prebuilt/AdrenoTest.apk ]; then
if [ ! -d /data/data/com.qualcomm.adrenotest ]; then
pm install /data/prebuilt/AdrenoTest.apk
fi
fi
# Install SWE_Browser.apk if not already installed
if [ -f /data/prebuilt/SWE_AndroidBrowser.apk ]; then
if [ ! -d /data/data/com.android.swe.browser ]; then
pm install /data/prebuilt/SWE_AndroidBrowser.apk
fi
fi
# Change adj level and min_free_kbytes setting for lowmemory killer to kick in
case "$target" in
"msm8660")
start qosmgrd
echo 0,1,2,4,9,12 > /sys/module/lowmemorykiller/parameters/adj
echo 5120 > /proc/sys/vm/min_free_kbytes
;;
esac
case "$target" in
"msm8226" | "msm8974" | "msm8610" | "apq8084" | "mpq8092" | "msm8610" | "msm8916" | "msm8994")
# Let kernel know our image version/variant/crm_version
image_version="10:"
image_version+=`getprop ro.build.id`
image_version+=":"
image_version+=`getprop ro.build.version.incremental`
image_variant=`getprop ro.product.name`
image_variant+="-"
image_variant+=`getprop ro.build.type`
oem_version=`getprop ro.build.version.codename`
echo 10 > /sys/devices/soc0/select_image
echo $image_version > /sys/devices/soc0/image_version
echo $image_variant > /sys/devices/soc0/image_variant
echo $oem_version > /sys/devices/soc0/image_crm_version
;;
esac
#Set per_process_reclaim tuning parameters
echo 50 > /sys/module/process_reclaim/parameters/pr_pressure_min
echo 70 > /sys/module/process_reclaim/parameters/pr_pressure_max
echo 512 > /sys/module/process_reclaim/parameters/per_swap_size
echo 30 > /sys/module/process_reclaim/parameters/swap_opt_eff
|
DestructoSphere/android_device_huawei_y6
|
rootdir/etc/init.qcom.post_boot.sh
|
Shell
|
gpl-3.0
| 71,185 |
#!/usr/bin/bash --
# Helper script for using an external diff program with Clearcase
# Specifically: Windows clearcase, Dynamic Views
# Adapted from stackoverflow/snip2code
#
# Intended usage: from parent dir of file to diff, type: ./ctDiffHelper.sh file.cpp
# Used with alias: ctdiff file.cpp (alias: ctdiff='~/bin/ctDiffHelper.sh')
my_ccview=`cleartool pwv -short`
echo "Set view: $my_ccview"
fileA=""
fileB=""
# NOTE: on windows/cygwin, may need to add symbolic link for 'kdiff3-qt'.
my_difftool="/usr/bin/kdiff3"
# check clearcase view
if [ "$my_ccview" == "** NONE **" ] ; then
echo "Error: ClearCase view not set, aborted."
exit -1
fi
if [ "$1" == "" ] ; then
echo "Error: missing 1st file argument!"
echo " (Expects linux-style file path)"
echo "Eg: `basename $0` foo.txt -> This will diff foo.txt with its previous version"
echo "Eg: `basename $0` foo.txt baz.txt -> This will diff foo.txt and baz.txt"
exit -1
fi
if [ "$2" == "" ] ; then
# No 2nd file passed, so find previous version
fileB="$1"
fileA="$fileB@@`cleartool descr -pred -short $fileB`"
fileA=`echo "$fileA" | sed 's^\\\\^/^g'`
else
fileA="$1"
fileB="$2"
fi
echo "File A: $fileA"
echo "File B: $fileB"
${my_difftool} ${fileA} ${fileB}
|
tniles/refBin
|
ctDiffHelper.sh
|
Shell
|
gpl-3.0
| 1,247 |
export qtune_GPIO0_ACTIVE_CONFIG=0x80000000
export qtune_GPIO1_ACTIVE_CONFIG=0x90000800
export qtune_GPIO2_ACTIVE_CONFIG=0x90000800
export qtune_GPIO3_ACTIVE_CONFIG=0x90000800
export qtune_GPIO4_ACTIVE_CONFIG=0x80000000
export qtune_GPIO5_ACTIVE_CONFIG=0x80000000
export qtune_GPIO6_ACTIVE_CONFIG=0x80000800
export qtune_GPIO7_ACTIVE_CONFIG=0x80000800
export qtune_GPIO8_ACTIVE_CONFIG=0xd0000048
export qtune_GPIO9_ACTIVE_CONFIG=0x80000800
export qtune_GPIO10_ACTIVE_CONFIG=0x80000000
export qtune_GPIO11_ACTIVE_CONFIG=0x80000000
export qtune_GPIO12_ACTIVE_CONFIG=0x80000000
export qtune_GPIO13_ACTIVE_CONFIG=0x80000000
export qtune_GPIO14_ACTIVE_CONFIG=0x80000800
export qtune_GPIO15_ACTIVE_CONFIG=0x80000800
export qtune_GPIO16_ACTIVE_CONFIG=0x80000800
export qtune_GPIO17_ACTIVE_CONFIG=0xa0000800
export qtune_GPIO18_ACTIVE_CONFIG=0x80000800
export qtune_GPIO19_ACTIVE_CONFIG=0x80000800
export qtune_GPIO20_ACTIVE_CONFIG=0x80000800
export qtune_GPIO21_ACTIVE_CONFIG=0x80000800
export qtune_GPIO22_ACTIVE_CONFIG=0x80000800
export qtune_GPIO23_ACTIVE_CONFIG=0x80000800
export qtune_GPIO24_ACTIVE_CONFIG=0x80000800
export qtune_GPIO25_ACTIVE_CONFIG=0x80000800
export qtune_GPIO26_ACTIVE_CONFIG=0x80000800
export qtune_GPIO27_ACTIVE_CONFIG=0x80000800
export qtune_GPIO28_ACTIVE_CONFIG=0xd0002808
export qtune_GPIO29_ACTIVE_CONFIG=0xd0002808
export qtune_GPIO30_ACTIVE_CONFIG=0xa0000800
export qtune_GPIO31_ACTIVE_CONFIG=0x80000800
export qtune_GPIO32_ACTIVE_CONFIG=0x80000800
export qtune_GPIO33_ACTIVE_CONFIG=0x80000800
export qtune_GPIO34_ACTIVE_CONFIG=0xd0000808
export qtune_GPIO35_ACTIVE_CONFIG=0xd0000808
export qtune_GPIO36_ACTIVE_CONFIG=0xd0000808
export qtune_GPIO37_ACTIVE_CONFIG=0xd0000808
export qtune_GPIO38_ACTIVE_CONFIG=0xd0000808
export qtune_GPIO39_ACTIVE_CONFIG=0xd0000808
export qtune_GPIO40_ACTIVE_CONFIG=0xd0000808
export qtune_GPIO0_INACTIVE_CONFIG=0xd000000d
export qtune_GPIO1_INACTIVE_CONFIG=0xd000000d
export qtune_GPIO2_INACTIVE_CONFIG=0xc0000008
export qtune_GPIO3_INACTIVE_CONFIG=0xd000000d
export qtune_GPIO4_INACTIVE_CONFIG=0xd0000008
export qtune_GPIO5_INACTIVE_CONFIG=0xd000000d
export qtune_GPIO6_INACTIVE_CONFIG=0x80001008
export qtune_GPIO7_INACTIVE_CONFIG=0xa0000008
export qtune_GPIO8_INACTIVE_CONFIG=0x90000808
export qtune_GPIO9_INACTIVE_CONFIG=0x90000808
export qtune_GPIO10_INACTIVE_CONFIG=0x80000808
export qtune_GPIO11_INACTIVE_CONFIG=0x90000028
export qtune_GPIO12_INACTIVE_CONFIG=0x80000808
export qtune_GPIO13_INACTIVE_CONFIG=0x80000808
export qtune_GPIO14_INACTIVE_CONFIG=0x90000000
export qtune_GPIO15_INACTIVE_CONFIG=0xb0000028
export qtune_GPIO16_INACTIVE_CONFIG=0xa0000028
export qtune_GPIO17_INACTIVE_CONFIG=0xa0000028
export qtune_GPIO18_INACTIVE_CONFIG=0x90000008
export qtune_GPIO19_INACTIVE_CONFIG=0xc0000028
export qtune_GPIO20_INACTIVE_CONFIG=0xa0000028
export qtune_GPIO21_INACTIVE_CONFIG=0x90000008
export qtune_GPIO22_INACTIVE_CONFIG=0x90000008
export qtune_GPIO23_INACTIVE_CONFIG=0x90000008
export qtune_GPIO24_INACTIVE_CONFIG=0x90000008
export qtune_GPIO25_INACTIVE_CONFIG=0x90007808
export qtune_GPIO26_INACTIVE_CONFIG=0x90000008
export qtune_GPIO27_INACTIVE_CONFIG=0x90000008
export qtune_GPIO28_INACTIVE_CONFIG=0x90000008
export qtune_GPIO29_INACTIVE_CONFIG=0x90000008
export qtune_GPIO30_INACTIVE_CONFIG=0x90000008
export qtune_GPIO31_INACTIVE_CONFIG=0x90000008
export qtune_GPIO32_INACTIVE_CONFIG=0x90000008
export qtune_GPIO33_INACTIVE_CONFIG=0x90000008
export qtune_GPIO34_INACTIVE_CONFIG=0xd0000808
export qtune_GPIO35_INACTIVE_CONFIG=0xd0000808
export qtune_GPIO36_INACTIVE_CONFIG=0xd0000808
export qtune_GPIO37_INACTIVE_CONFIG=0xd0000808
export qtune_GPIO38_INACTIVE_CONFIG=0xd0000808
export qtune_GPIO39_INACTIVE_CONFIG=0xd0000808
export qtune_GPIO40_INACTIVE_CONFIG=0xd0000808
export qtune_GPIO0_PERIPHERAL_ID=0xb
export qtune_GPIO1_PERIPHERAL_ID=0xb
export qtune_GPIO2_PERIPHERAL_ID=0xb
export qtune_GPIO3_PERIPHERAL_ID=0xb
export qtune_GPIO4_PERIPHERAL_ID=0xb
export qtune_GPIO5_PERIPHERAL_ID=0xb
export qtune_GPIO6_PERIPHERAL_ID=0xb
export qtune_GPIO7_PERIPHERAL_ID=0xb
export qtune_GPIO8_PERIPHERAL_ID=0xb
export qtune_GPIO9_PERIPHERAL_ID=0xb
export qtune_GPIO10_PERIPHERAL_ID=0xb
export qtune_GPIO11_PERIPHERAL_ID=0xb
export qtune_GPIO12_PERIPHERAL_ID=0xb
export qtune_GPIO13_PERIPHERAL_ID=0xb
export qtune_GPIO14_PERIPHERAL_ID=0xb
export qtune_GPIO15_PERIPHERAL_ID=0xb
export qtune_GPIO16_PERIPHERAL_ID=0xb
export qtune_GPIO17_PERIPHERAL_ID=0xb
export qtune_GPIO18_PERIPHERAL_ID=0xb
export qtune_GPIO19_PERIPHERAL_ID=0xb
export qtune_GPIO20_PERIPHERAL_ID=0xb
export qtune_GPIO21_PERIPHERAL_ID=0xb
export qtune_GPIO22_PERIPHERAL_ID=0xb
export qtune_GPIO23_PERIPHERAL_ID=0xb
export qtune_GPIO24_PERIPHERAL_ID=0xb
export qtune_GPIO25_PERIPHERAL_ID=0xb
export qtune_GPIO26_PERIPHERAL_ID=0xb
export qtune_GPIO27_PERIPHERAL_ID=0xb
export qtune_GPIO28_PERIPHERAL_ID=0xb
export qtune_GPIO29_PERIPHERAL_ID=0xb
export qtune_GPIO30_PERIPHERAL_ID=0xb
export qtune_GPIO31_PERIPHERAL_ID=0xb
export qtune_GPIO32_PERIPHERAL_ID=0xb
export qtune_GPIO33_PERIPHERAL_ID=0xb
export qtune_GPIO34_PERIPHERAL_ID=0xe
export qtune_GPIO35_PERIPHERAL_ID=0xe
export qtune_GPIO36_PERIPHERAL_ID=0xe
export qtune_GPIO37_PERIPHERAL_ID=0xe
export qtune_GPIO38_PERIPHERAL_ID=0xe
export qtune_GPIO39_PERIPHERAL_ID=0xe
export qtune_GPIO40_PERIPHERAL_ID=0xe
export APPS_OUTFILE="../../image/aj_svclite.out"
|
dengcj0/QCA4010
|
target/tool/tunable/tuneenv.sh
|
Shell
|
gpl-3.0
| 5,349 |
#!/bin/bash
# Crack PKCS12 file using a dictionary attack
DICCIONARIO=$1
ARCHIVO=$2
COUNT=0
for PSW in $(cat $DICCIONARIO)
do
let COUNT+=1
let VAL=COUNT%1000
if [ $VAL -eq 0 ]; then date; echo Intento $COUNT; fi
if ( openssl pkcs12 -noout -passin pass:${PSW} -in ${ARCHIVO} 2> /dev/null )
then
echo
echo Password found: $PSW
exit
fi
done
|
crackpkcs12/crackpkcs12
|
misc/sh/crackpkcs12.sh
|
Shell
|
gpl-3.0
| 344 |
#! /bin/bash
USER=user
PASSWORD=password
while true
do
mosquitto_pub -u ${USER} -P ${PASSWORD} -t /IoTmanager/dev-fake2/light5/status -m "{ \"status\" : \"OFF\", \"class3\" : \"calm-bg light padding-left padding-right rounded\" }"
mosquitto_pub -u ${USER} -P ${PASSWORD} -t /IoTmanager/dev-fake2/light6/status -m "{\"descr\":\"Heating already stopped\",\"class2\":\"calm\",\"class3\":\"button icon ion-close-circled\",\"widgetConfig\":{\"fill\":\"#AAAAAA\",\"fillPressed\":\"#EEEEEE\",\"disabled\": 1}}"
BOILER=$(cat boiler.txt)
let "BOILER = BOILER + 20"
mosquitto_pub -u ${USER} -P ${PASSWORD} -t /IoTmanager/dev-fake2/light4/status -m "{\"status\":\"${BOILER}\"}"
sleep 30
mosquitto_pub -u ${USER} -P ${PASSWORD} -t /IoTmanager/dev-fake2/light5/status -m "{ \"status\" : \"ON\", \"class3\" : \"assertive-bg light padding-left padding-right rounded\" }"
mosquitto_pub -u ${USER} -P ${PASSWORD} -t /IoTmanager/dev-fake2/light6/status -m "{ \"descr\" : \"Emergency Stop heating\",\"class2\" : \"assertive\",\"class3\" : \"button icon ion-checkmark-circled\",\"widgetConfig\" : { \"fill\" : \"#FF5050\",\"fillPressed\" : \"#FF7070\",\"disabled\": 0 }}"
BOILER=$(cat boiler.txt)
let "BOILER = BOILER - 20"
mosquitto_pub -u ${USER} -P ${PASSWORD} -t /IoTmanager/dev-fake2/light4/status -m "{\"status\":\"${BOILER}\"}"
sleep 30
done
|
bigjohnson/RaspyIot
|
fakeboiler.sh
|
Shell
|
gpl-3.0
| 1,349 |
#!/bin/bash
#tar xvzf PPPoEDI*
#cd PPPoEDI*
python3 setup.py install
|
LAR-UFES/pppoe-plugin
|
install.sh
|
Shell
|
gpl-3.0
| 69 |
#
# A desktop-oriented virtual machines management system written in Shell.
#
# Code is available online at https://github.com/magenete/cuckoo
# See LICENSE for licensing information, and README for details.
#
# Copyright (C) 2016 Magenete Systems OÜ.
#
# Options definition
cuckoo_args()
{
ARGS_SHORT="s:irbIV:qd:p:e:lxD:P:E:LXWUZw:zQB:A:O:a:o:v:m:K:T:S:C:c:f:M:FNt:R:h"
ARGS_LONG="setup:,install,run,qemu-build,qemu-list,qemu-version:,qemu-delete,iso-download:,iso-import:,iso-export:,iso-list,iso-delete,hd-download:,hd-import:,hd-export:,hd-list,hd-delete,config-create,config-update,config-delete,desktop-create:,desktop-delete,qemu-system,qemu-branch:,qemu-arch:,qemu-os-name:,arch:,os-name:,dist-version:,memory-size:,cpu-cores:,cpu-threads:,cpu-sockets:,cdrom-add:,cdrom-boot:,floppy-boot:,smb-dir:,full-screen,no-daemonize,hd-type:,opts-add:,version,help"
OPTS="$(getopt -o "${ARGS_SHORT}" -l "${ARGS_LONG}" -a -- "$@" 2>/dev/null)"
if [ $? -gt 0 ]
then
cuckoo_error "Invalid option(s) value"
fi
eval set -- "$OPTS"
# Options parsing
while [ $# -gt 0 ]
do
case $1 in
-- )
shift 1
;;
# Actions
--setup | -s )
CUCKOO_ACTION="setup"
if [ -d "$2" ]
then
CUCKOO_SETUP_DIR="${2}/"
CUCKOO_EMULATOR_SETUP_DIR="${CUCKOO_SETUP_DIR}${CUCKOO}/"
CUCKOO_EMULATOR_ACTION="setup"
else
cuckoo_error "Directory '${2}' does not exist, so it can not be used for setup"
fi
shift 2
;;
--install | -i )
CUCKOO_ACTION="install"
shift 1
;;
--run | -r )
CUCKOO_ACTION="run"
shift 1
;;
--qemu-build | -b )
CUCKOO_EMULATOR_ACTION="build"
shift 1
;;
--qemu-list | -I )
CUCKOO_EMULATOR_ACTION="list"
shift 1
;;
--qemu-version | -V )
CUCKOO_EMULATOR_ACTION="version"
CUCKOO_EMULATOR_VERSION="$2"
shift 2
;;
--qemu-delete | -q )
CUCKOO_EMULATOR_ACTION="delete"
shift 1
;;
--iso-download | -d )
CUCKOO_ACTION="iso-setup"
CUCKOO_ISO_FILE_PATH="$2"
CUCKOO_ISO_FILE_NET="yes"
shift 2
;;
--iso-import | -p )
CUCKOO_ACTION="iso-setup"
if [ -f "$2" ]
then
CUCKOO_ISO_FILE_PATH="$2"
CUCKOO_ISO_FILE_NET=""
else
cuckoo_error "ISO file '${2}' does not exist"
fi
shift 2
;;
--iso-export | -e )
CUCKOO_ACTION="iso-export"
if [ -d "$2" ]
then
CUCKOO_ISO_FILE_PATH="${2}/"
CUCKOO_ISO_FILE_NET=""
else
cuckoo_error "Directory '${2}' does not exist, so it can not be used for export"
fi
shift 2
;;
--iso-list | -l )
CUCKOO_ACTION="iso-list"
shift 1
;;
--iso-delete | -x )
CUCKOO_ACTION="iso-delete"
shift 1
;;
--hd-download | -D )
CUCKOO_ACTION="hd-setup"
CUCKOO_HD_FILE_PATH="$2"
CUCKOO_HD_FILE_NET="yes"
shift 2
;;
--hd-import | -P )
CUCKOO_ACTION="hd-setup"
if [ -f "$2" ] || [ -d "$2" ]
then
CUCKOO_HD_FILE_PATH="$2"
CUCKOO_HD_FILE_NET=""
else
cuckoo_error "HD directory or file '${2}' does not exist"
fi
shift 2
;;
--hd-export | -E )
CUCKOO_ACTION="hd-export"
if [ -d "$2" ]
then
CUCKOO_HD_FILE_PATH="${2}/"
CUCKOO_HD_FILE_NET=""
else
cuckoo_error "Directory '${2}' does not exist, so it can not be used for export"
fi
shift 2
;;
--hd-list | -L )
CUCKOO_ACTION="hd-list"
shift 1
;;
--hd-delete | -X )
CUCKOO_ACTION="hd-delete"
shift 1
;;
--config-create | -W )
CUCKOO_ACTION="config"
CUCKOO_DIST_VERSION_CONFIG="create"
shift 1
;;
--config-update | -U )
CUCKOO_ACTION="config"
CUCKOO_DIST_VERSION_CONFIG="update"
shift 1
;;
--config-delete | -Z )
CUCKOO_ACTION="config"
CUCKOO_DIST_VERSION_CONFIG="delete"
shift 1
;;
--desktop-create | -w )
CUCKOO_ACTION="desktop"
CUCKOO_DIST_VERSION_DESKTOP="create"
CUCKOO_DIST_VERSION_DESKTOP_STYLE="$2"
if [ -z "$(valid_value_in_arr "$CUCKOO_DIST_VERSION_DESKTOP_STYLE_LIST" "$2")" ]
then
cuckoo_error "Desktop style '${2}' is not supported"
else
CUCKOO_DIST_VERSION_DESKTOP_STYLE="$2"
fi
shift 2
;;
--desktop-delete | -z )
CUCKOO_ACTION="desktop"
CUCKOO_DIST_VERSION_DESKTOP="delete"
shift 1
;;
# Arguments
--qemu-system | -Q )
CUCKOO_EMULATOR_ACTION="run-system"
shift 1
;;
--qemu-branch | -B )
CUCKOO_EMULATOR_VERSION="$2"
shift 2
;;
--qemu-arch | -A )
if [ -z "$(valid_value_in_arr "$CUCKOO_EMULATOR_ARCH_LIST" "$2")" ]
then
CUCKOO_EMULATOR_NAME="${CUCKOO_EMULATOR_NAME:=$CUCKOO_EMULATOR_NAME_DEFAULT}"
cuckoo_error "${CUCKOO_EMULATOR_NAME} architecture '${2}' is not supported"
else
CUCKOO_EMULATOR_ARCH="$2"
fi
shift 2
;;
--qemu-os-name | -O )
if [ -z "$(valid_value_in_arr "$CUCKOO_EMULATOR_OS_LIST" "$2")" ]
then
CUCKOO_EMULATOR_NAME="${CUCKOO_EMULATOR_NAME:=$CUCKOO_EMULATOR_NAME_DEFAULT}"
cuckoo_error "${CUCKOO_EMULATOR_NAME} OS '${2}' is not supported"
else
CUCKOO_EMULATOR_OS="$2"
fi
shift 2
;;
--arch | -a )
if [ -z "$(valid_value_in_arr "$CUCKOO_ARCH_LIST" "$2")" ]
then
cuckoo_error "OS architecture '${2}' is not supported"
else
CUCKOO_ARCH="$2"
fi
shift 2
;;
--os-name | -o )
if [ -z "$(valid_value_in_arr "$CUCKOO_OS_LIST" "$2")" ]
then
cuckoo_error "OS '${2}' is not supported"
else
CUCKOO_OS="$2"
fi
shift 2
;;
--dist-version | -v )
if [ -z "$(cuckoo_dist_version_value_check "$2")" ]
then
cuckoo_error "Invalid distributive/version '${2}'"
else
CUCKOO_DIST_VERSION="$2"
fi
shift 2
;;
--memory-size | -m )
CUCKOO_MEMORY_SIZE="$2"
shift 2
;;
--cpu-cores | -K )
if [ $2 -ge $CUCKOO_CPU_MIN ] && [ $2 -le $CUCKOO_CPU_CORES_MAX ]
then
CUCKOO_CPU_CORES=$2
else
cuckoo_error "Invalid number of CPU cores '${2}'"
fi
shift 2
;;
--cpu-threads | -T )
if [ $2 -ge $CUCKOO_CPU_MIN ] && [ $2 -le $CUCKOO_CPU_THREADS_MAX ]
then
CUCKOO_CPU_THREADS=$2
else
cuckoo_error "Invalid number of CPU threads '${2}'"
fi
shift 2
;;
--cpu-sockets | -S )
if [ $2 -ge $CUCKOO_CPU_MIN ] && [ $2 -le $CUCKOO_CPU_SOCKETS_MAX ]
then
CUCKOO_CPU_SOCKETS=$2
else
cuckoo_error "Invalid number of CPU sockets '${2}'"
fi
shift 2
;;
--cdrom-add | -C )
if [ -f "$2" ]
then
CUCKOO_CDROM_ADD_FILE="$2"
else
cuckoo_error "CDROM file '${2}' does not exist, so it can not be added"
fi
shift 2
;;
--cdrom-boot | -c )
if [ -f "$2" ]
then
CUCKOO_CDROM_BOOT_FILE="$2"
else
cuckoo_error "CDROM file '${2}' does not exist"
fi
shift 2
;;
--floppy-boot | -f )
if [ -f "$2" ]
then
CUCKOO_FLOPPY_BOOT_FILE="$2"
else
cuckoo_error "Floppy Disk file '${2}' does not exist"
fi
shift 2
;;
--smb-dir | -M )
if [ -d "$2" ]
then
CUCKOO_SMB_DIR="$2"
else
cuckoo_error "SMB directory '${2}' does not exist"
fi
shift 2
;;
--full-screen | -F )
CUCKOO_FULL_SCREEN="yes"
shift 1
;;
--no-daemonize | -N )
CUCKOO_DAEMONIZE_NO="yes"
shift 1
;;
--hd-type | -t )
if [ -z "$(valid_value_in_arr "$CUCKOO_EMULATOR_HD_TYPE_LIST" "$2")" ]
then
cuckoo_error "HD type '${2}' is not supported"
else
CUCKOO_HD_TYPE="$2"
fi
shift 2
;;
--opts-add | -R )
CUCKOO_OPTS_EXT="$2"
shift 2
;;
--version | -V )
CUCKOO_ENV_NO="yes"
cuckoo_variables
echo "Cuckoo version: $(cat "${CUCKOO_ETC_VERSION_FILE}")"
exit 0
;;
--help | -h )
cuckoo_help
exit 0
;;
* )
cuckoo_error "Invalid option(s)"
;;
esac
done
}
|
magenete/cuckoo
|
cuckoo/lib/mgt/argv.sh
|
Shell
|
gpl-3.0
| 10,152 |
#!/bin/bash
#
set -e
rm -rf ./out
cp configs/.config-apl-cros .config
make EXTRAVERSION=-MrChromebox-`date +"%Y.%m.%d"`
filename="seabios-apl-mrchromebox_`date +"%Y%m%d"`.bin"
cbfstool ${filename} create -m x86 -s 0x00200000
cbfstool ${filename} add-payload -f ./out/bios.bin.elf -n payload -b 0x0 -c lzma
cbfstool ${filename} add -f ./out/vgabios.bin -n vgaroms/seavgabios.bin -t optionrom
cbfstool ${filename} add -f ~/dev/coreboot/cbfs/bootorder.emmc.apl -n bootorder -t raw
cbfstool ${filename} add-int -i 3000 -n etc/boot-menu-wait
cbfstool ${filename} print
md5sum ${filename} > ${filename}.md5
mv ${filename}* ~/dev/firmware/
|
MattDevo/SeaBIOS
|
build-apl-cros.sh
|
Shell
|
gpl-3.0
| 633 |
gfortran -c gauss_converge.f95 gauss_tools.f95
gfortran gauss_converge.o gauss_tools.o
|
wcdawn/matrix_math
|
fortran_system_solve/quick_build.sh
|
Shell
|
gpl-3.0
| 86 |
util_dir="$(dirname $(readlink -f $BASH_SOURCE))"
hookit_dir="$(readlink -f ${util_dir}/../../src)"
payloads_dir=$(readlink -f ${util_dir}/../payloads)
payload() {
cat ${payloads_dir}/${1}.json
}
run_hook() {
container=$1
hook=$2
payload=$3
docker exec \
$container \
/opt/nanobox/hooks/$hook "$payload"
}
start_container() {
name=$1
ip=$2
docker run \
--name=$name \
-d \
-e "PATH=$(path)" \
--privileged \
--net=nanobox \
--ip=$ip \
--volume=${hookit_dir}/:/opt/nanobox/hooks \
nanobox/postgresql:$VERSION
}
stop_container() {
docker stop $1
docker rm $1
}
path() {
paths=(
"/opt/gonano/sbin"
"/opt/gonano/bin"
"/opt/gonano/bin"
"/usr/local/sbin"
"/usr/local/bin"
"/usr/sbin"
"/usr/bin"
"/sbin"
"/bin"
)
path=""
for dir in ${paths[@]}; do
if [[ "$path" != "" ]]; then
path="${path}:"
fi
path="${path}${dir}"
done
echo $path
}
|
nanobox-io/nanobox-hooks-postgresql
|
test/util/docker.sh
|
Shell
|
mpl-2.0
| 966 |
#!/bin/bash -ex
echo -e '\e[1m\e[36m========== Installing gulp & dependencies ==========\e[0m'
yarn install
# Install dependencies one-by-one to avoid race-conditions
yarn --cwd ./scripts/shared-hugo-scripts/wiki/
yarn --cwd ./scripts/shared-hugo-scripts/compatdb/
yarn hugo version
echo -e '\e[1m\e[36m========== Starting gulp deploy task ===============\e[0m'
if [[ -n "${EPHEMERAL_BASE_URL}" ]]; then
echo -e "\e[1m\e[36m========== Ephemeral Mode URL: ${EPHEMERAL_BASE_URL} ===============\e[0m"
yarn run gulp all --ephemeral "${EPHEMERAL_BASE_URL}"
else
yarn run build
fi
echo -e '\e[1m\e[32m Success! Site deployed to `build` folder.\e[0m'
|
citra-emu/citra-web
|
.ci/build.sh
|
Shell
|
agpl-3.0
| 659 |
#!/bin/bash
if [ -z "1" ]
then
ITERATIONS=10
else
ITERATIONS=$1
fi
echo "Running for $ITERATIONS";
for (( i=0; i < $ITERATIONS; i++ ))
do
echo "Iteration $i"
./delete-population.pl
./delete-population.pl
./initial-pop-with-eval.pl conf R-ip128-r64
./mini-ga-sort-adapt.pl conf R-ip128-r16 &
./mini-ga-sort-adapt.pl conf R-ip128-r32 &
./mini-ga-sort-adapt.pl conf R-ip128-r64
done
|
JJ/SofEA
|
perl/run-mini-ga-sort-adapt-dis.sh
|
Shell
|
agpl-3.0
| 424 |
#!/bin/sh
MEM=128m
ROOT_DIR=$(dirname $(cd $(dirname "$0"); pwd););
LIB_DIR="$ROOT_DIR/lib";
CONFIG_DIR="$ROOT_DIR/config/config";
# set monitor mode for resuming java job from background
#set -o monitor
java \
"-Xms$MEM" "-Xmx$MEM" \
-jar "$LIB_DIR/jetty/start.jar"\
-Dsolr.solr.home="$ROOT_DIR/search"\
-Dsolr.data.dir="$ROOT_DIR/data/solr"\
OPTIONS=All\
"$CONFIG_DIR/jetty-solr.xml"
# save pid of java process:
echo $! > "$ROOT_DIR/data/solr.pid"
# bring java process to foreground:
#fg
|
emilis/PolicyFeed
|
bin/start-solr.sh
|
Shell
|
agpl-3.0
| 521 |
#!/bin/bash
if [ "$1" == "all" ]
then
docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)
docker rmi $(docker images -q)
docker volume rm $(docker volume ls -qf dangling=true)
else
# stop and rm all containers
docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)
# stop and rm exited containers
# docker stop $(docker ps -q -f status=exited)
# docker rm $(docker ps -q -f status=exited)
fi
|
ttssdev/appflow
|
containers/bin/docker-cleanup.sh
|
Shell
|
agpl-3.0
| 445 |
#!/bin/bash
DIRBIN="./"$(dirname $0)
CSVMOTS=$1
TXTMOTS=$(echo "$CSVMOTS" | sed 's/\.txt.nossenateurs.fr.5mots.txt/.txt/')
DIRMOTS=$(echo $TXTMOTS | sed 's/.txt//' | sed 's/ /_/g' | sed "s/'//ig")"/html"
mkdir -p "$DIRMOTS"
cat "$CSVMOTS" | grep -v Texteloi | grep -v NonObjectPage | awk -F ';' '{print "echo DIRMOTS/"$3".txt ; curl -s http://www.nossenateurs.fr"$4" | sed @s/<[^>]*>/\\n/g@ | sed @s/.amp;lt;/</g@ | sed @s/.amp;gt;/>/g@ | sed @s/<[^>]*>//g@ > @DIRMOTS/"$3".txt@" }' | sed 's/@/"/g' | sed "s@DIRMOTS@$DIRMOTS@g" > /tmp/sh.txt
bash /tmp/sh.txt | head -n 1000 | while read file ; do
echo python $DIRBIN/highlightTrack.py '"'$TXTMOTS'"' $file
python $DIRBIN/highlightTrack.py "$TXTMOTS" $file > $file.html
done 2> "$TXTMOTS"".nbmots.csv"
cat "$TXTMOTS"".nbmots.csv" | sed 's/;[^0-9][^;]*html./;/' | sed 's/.txt//' | sort -t ';' -k 2,2 > "$TXTMOTS"".nbmots.sorted.csv"
sort -t ';' -k 3,3 "$CSVMOTS" > "$CSVMOTS"".sorted.csv"
join -t ';' -1 2 -2 3 "$TXTMOTS"".nbmots.sorted.csv" "$CSVMOTS"".sorted.csv" | sort -t ';' -k 2,2 -n > "$TXTMOTS"".proximite.csv"
|
regardscitoyens/LobbyTrack
|
compare_from_mots.sh
|
Shell
|
agpl-3.0
| 1,081 |
#!/bin/sh
git clone [email protected]:timetabio/config
|
timetabio/code
|
scripts/setup.sh
|
Shell
|
agpl-3.0
| 53 |
#!/bin/bash
empty_translations=$(grep -r '""' locale -c)
empty_translations_count=0
for line in $empty_translations
do
count=$(cut -d ':' -f2 <<< $line)
empty_translations_count=$((empty_translations_count+count))
echo "empty: $line"
done
fuzzy_translations=$(grep -r 'fuzzy' locale -c)
fuzzy_translations_count=0
for line in $fuzzy_translations
do
count=$(cut -d ':' -f2 <<< $line)
fuzzy_translations_count=$((fuzzy_translations_count+count))
echo "fuzzy: $line"
done
total=$(($empty_translations_count+fuzzy_translations_count))
exit $total
|
HelloLily/hellolily
|
checks/git/translations.sh
|
Shell
|
agpl-3.0
| 575 |
#!/bin/bash
META_INF_DIR=/opt/Ephesoft/Application/WEB-INF/classes/META-INF
DB_PROPERTIES_FILE=$META_INF_DIR/dcma-data-access/dcma-db.properties
OPEN_OFFICE_CONFIG=$META_INF_DIR/dcma-open-office/open-office.properties
#set Operational sistem as RedHat
echo "os_name=redhat" >> /etc/Ephesoft/ephesoft.conf
#Remove all sudo command from ephesoft startup script
sed -i "s/sudo/\ /g" /etc/init.d/ephesoft
#Set openoffice autostart as false
sed -i "/openoffice.autoStart=/ s/=true/=false/" $OPEN_OFFICE_CONFIG
#set MariaDB as a default database
sed -i "/dataSource.url=/ s|=.*|=jdbc:mysql:\/\/localhost:3306\/ephesoft|" $DB_PROPERTIES_FILE
sed -i "/dataSource.username=/ s|=.*|=ephesoft|" $DB_PROPERTIES_FILE
sed -i "/dataSource.password=/ s|=.*|=ephesoft|" $DB_PROPERTIES_FILE
#sed -i "/dataSource.dialect=/ s|=.*|=org.hibernate.dialect.MySQL5InnoDBDialect|" $DB_PROPERTIES_FILE
#sed -i "/dataSource.driverClassName=/ s|=.*|=org.h2.Driver|" $DB_PROPERTIES_FILE
#create mount poiint to shared folder
mkdir /shared
# Add shared loader driver libary - use this folder to mount the host directory that contains
# all drivers and extra jars that you wanna load during the tomcat startup.
#
# You also can use volume to pass to the container the jars that you wanna load.
#
#mkdir /driver
#sed -i "/shared.loader=/ s|=.*|=/driver/*.jar|" /opt/Ephesoft/JavaAppServer/conf/catalina.properties
#remove garbadge
yum clean packages
yum clean headers
yum clean metadata
yum clean dbcache
rm -rf /tmp/*
rm -rf /opt/sources/*
cd /opt/Ephesoft/Dependencies/
rm -rf *.tar.gz \
dependencies_redhat \
dependencies_ubuntu \
svutil.cpp \
UpgradeEphesoft.jar \
MariaDBSetup \
luke
|
gsdenys/ephesoft
|
assets/post-install.sh
|
Shell
|
agpl-3.0
| 1,696 |
#!/bin/bash -x
# Typical usage:
# lockrun -L=/tmp/timit.lock --wait --sleep=60 -- <thisfile> <begin_epoch> <end_epoch>
# Example:
# lockrun -L=/tmp/timit.lock --wait --sleep=60 -- run-continue-timit.sh 1100 1200
# python theanodbn/runners/run_dbn_rambatch.py -c conf/timit/timit_c2001_l6_2048.py --continue-run --finetune-lr=0.6 --finetuned-model-file=../model/timit/best_eta.ep300/timit.finetune.lr0.6.ep300.pkl --finetune-epoch-start=200 --finetune-training-epochs=300 --start-model-file=../model/timit/best_eta.ep200/timit.finetune.lr0.6.ep200.pkl 2>&1 | tee -a ../log/timit/best_eta.ep300/timit_c2001_l6_2048.0.6.log
[ "x$1" = "x" ] && { echo "Error: No arg1"; exit 1; }
[ "x$2" = "x" ] && { echo "Error: No arg2"; exit 1; }
[ $1 -ge $2 ] && { echo "Error: arg1 is greater-than-or-equal-to arg2"; exit 1; }
BEGIN=$1
END=$2
mkdir -p ../log/timit/best_eta.ep${END}
mkdir -p ../model/timit/best_eta.ep${END}
LOGFILE="../log/timit/best_eta.ep${END}/timit_c2001_l6_2048.0.6.log"
touch $LOGFILE
python theanodbn/runners/run_dbn_rambatch.py -c conf/timit/timit_c2001_l6_2048.py --continue-run --finetune-lr=0.6 --finetuned-model-file=../model/timit/best_eta.ep${END}/timit.finetune.lr0.6.ep${END}.pkl --finetune-epoch-start=${BEGIN} --finetune-training-epochs=${END} --start-model-file=../model/timit/best_eta.ep${BEGIN}/timit.finetune.lr0.6.ep${BEGIN}.pkl 2>&1 | tee -a ${LOGFILE}
chmod -w ../model/timit/best_eta.ep${END}/timit.finetune.lr0.6.ep${END}.pkl
chmod -w ../log/timit/best_eta.ep${END}/timit_c2001_l6_2048.0.6.log
|
jraman/gpupetuum
|
src/util/run-continue-timit.sh
|
Shell
|
lgpl-2.1
| 1,532 |
#!/bin/bash
# set -e
# set -x
python setup.py sdist bdist_wheel
python pack.py
|
Neutree/COMTool
|
tool/test.sh
|
Shell
|
lgpl-3.0
| 81 |
#!/bin/bash
#arkExportMapForSave.sh
#script for correctly and safely exporting Ark saved data while running
#if incorrect number of arguments passed
if [ $# -ne 1 ]; then
echo "No arguments supplied"
exit 1
fi
#check if argument is appropriate
if [[ "$1" =~ [^a-zA-Z0-9-_] ]]; then
echo "INVALID"
exit 1
fi
baseDir="/home/zach/Servers/Ark_Server/ShooterGame/SavedMaps"
saveDir="$baseDir/$1"
if [ ! -d "$saveDir" ]; then
# Control will enter here if $DIRECTORY doesn't exist.
cd $baseDir
mkdir $saveDir
fi
#backup maps
cd ~/Servers/Ark_Server/ShooterGame;
cp -r Content/Mods SavedMaps/"$1"
cp -r Saved SavedMaps/"$1"
echo -e "\e[42m Map Saved! \e[0m"
exit 0; #not sure if this is needed
|
Pikachar2/ServerController
|
Ark Scripts/arkExportMapForSave.sh
|
Shell
|
lgpl-3.0
| 698 |
#!/bin/bash
#
# Check All IPMI Sensors
#
if $(grep -Eq 'flags.*hypervisor' /proc/cpuinfo); then
echo "OK. This is a virtual machine"
exit 0
fi
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root" 1>&2
exit 1
fi
ipmitool_bin=$(which ipmitool 2> /dev/null)
if [[ $? -ne 0 ]]; then
echo "ERROR: No such ipmitool command, try 'yum install ipmitool'."
exit 2
fi
ipmi_sensor_list=$(${ipmitool_bin} sensor list | grep -v 'command failed' | awk -F '|' '{print $1"|"$2"|"$3"|"$4"|"}' | sed -e 's/ *| */|/g' -e 's/ /_/g' | grep -Evw 'ns|nc|na|discrete')
ipmi_sensor_count=$(echo ${ipmi_sensor_list} | xargs -n 1 | wc -l)
for ipmi_sensor_item in ${ipmi_sensor_list}; do
ipmi_sensor_name=$(echo ${ipmi_sensor_item} | cut -d'|' -f1)
ipmi_sensor_value=$(echo ${ipmi_sensor_item} | cut -d'|' -f2)
ipmi_sensor_unit=$(echo ${ipmi_sensor_item} | cut -d'|' -f3)
ipmi_sensor_status=$(echo ${ipmi_sensor_item} | cut -d'|' -f4)
if [[ ${ipmi_sensor_status} != 'ok' ]]; then
if [[ -z "${crit_msg}" ]]; then
crit_msg="${ipmi_sensor_name} is ${ipmi_sensor_value} ${ipmi_sensor_unit}"
else
crit_msg="${crit_msg}, ${ipmi_sensor_name} is ${ipmi_sensor_value} ${ipmi_sensor_unit}"
fi
fi
done
if [[ -z "${crit_msg}" ]]; then
echo "OK. All ${ipmi_sensor_count} Sensors are OK"
exit 0
else
echo "CRIT. ${crit_msg}"
exit 2
fi
|
WZQ1397/automatic-repo
|
shell/system/check_ipmi_sensor_summary.sh
|
Shell
|
lgpl-3.0
| 1,365 |
#!/bin/bash
#
# Model adaptation:
#
# ./speech_sentences.py -l de gspv2
# cut -f 1 -d ' ' data/models/kaldi-generic-de-tdnn_f-latest/data/local/dict/lexicon.txt >vocab.txt
# lmplz -o 4 --prune 0 1 2 3 --limit_vocab_file vocab.txt --interpolate_unigrams 0 <data/dst/text-corpora/gspv2.txt >lm.arpa
# ./speech_kaldi_adapt.py data/models/kaldi-generic-de-tdnn_f-latest dict-de.ipa lm.arpa gspv2
# pushd data/dst/asr-models/kaldi/gspv2
# bash run-adaptation.sh
# popd
# MODEL='kaldi-generic-de-tdnn_f-latest'
MODEL='kaldi-gspv2-adapt-r20190329'
./auto_review.py -m ${MODEL} -s 12 -o 0 -R tmp/res00.csv gspv2 &
./auto_review.py -m ${MODEL} -s 12 -o 1 -R tmp/res01.csv gspv2 &
./auto_review.py -m ${MODEL} -s 12 -o 2 -R tmp/res02.csv gspv2 &
./auto_review.py -m ${MODEL} -s 12 -o 3 -R tmp/res03.csv gspv2 &
./auto_review.py -m ${MODEL} -s 12 -o 4 -R tmp/res04.csv gspv2 &
./auto_review.py -m ${MODEL} -s 12 -o 5 -R tmp/res05.csv gspv2 &
./auto_review.py -m ${MODEL} -s 12 -o 6 -R tmp/res06.csv gspv2 &
./auto_review.py -m ${MODEL} -s 12 -o 7 -R tmp/res07.csv gspv2 &
./auto_review.py -m ${MODEL} -s 12 -o 8 -R tmp/res08.csv gspv2 &
./auto_review.py -m ${MODEL} -s 12 -o 9 -R tmp/res09.csv gspv2 &
./auto_review.py -m ${MODEL} -s 12 -o 10 -R tmp/res10.csv gspv2 &
./auto_review.py -m ${MODEL} -s 12 -o 11 -R tmp/res11.csv gspv2 &
wait
echo "to apply the review result, run:"
echo "./apply_review.py -l de gspv2 tmp/*.csv"
|
gooofy/speech
|
12review.sh
|
Shell
|
lgpl-3.0
| 1,432 |
#!/bin/bash
echo -n "# Update package index files "
apt-get update
echo -n "# Install updates "
apt-get dist-upgrade -y
apt-get upgrade -y
apt-get install -f
echo -n "# Remove unused packages "
apt-get autoclean
apt-get autoremove -y
apt-get clean
dpkg --purge `COLUMNS=300 dpkg -l "*" | egrep "^rc" | cut -d" " -f3`
echo -n "# Update script completed "
|
Croxarens/bashy
|
system-apt-update.sh
|
Shell
|
unlicense
| 357 |
#!/bin/bash
set -xe
# TODO: this whole thing seemed like a good idea at first but now I get the feeling that ansible would be better for this.
# ssh keys + config in place?
(test -e ~/.ssh/id_rsa.pub && grep jaXVXHL ~/.ssh/id_rsa.pub &> /dev/null) || (echo "are your ssh keys in place duder?" && exit 1)
mypath=$(exec 2>/dev/null;cd -- $(dirname "$0"); unset PWD; /usr/bin/pwd || /bin/pwd || pwd)
distro=$(uname -s)
# Homebrew / apt basics.
if [ "$distro" = "Darwin" ]; then
test -x "$(command -v brew)" || echo "installing homebrew...." && bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)";
brew update;
echo "Attempting to prompt to install xcode CLI tools, or print out installed location of tools. An error here is not catastrophic, relax.";
xcode-select --install || xcode-select -p;
elif [ "$distro" = "Linux" ]; then
sudo apt-get update;
fi
# mac and linux friendly package installation
install() {
local pkg=$1
if [ "$distro" = "Linux" ]; then
case $pkg in
ack)
pkg="ack-grep" ;;
ctags)
pkg="exuberant-ctags"
esac
sudo apt-get install -y $pkg
elif [ "$distro" = "Darwin" ]; then
brew install $pkg
fi
}
test -x "$(command -v git)" || install git
# Installs oh-my-zsh
if [[ $SHELL != *"zsh"* ]]; then
echo "going to install zsh, hold on to your butt"
install zsh
sh -c "$(curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)"
echo "zsh installed. rerun this ($0) now."
exit 0
fi
# Update submodules in this repo
git submodule update --init
# Pathogen (bundle management for vim)
if ! [ -d ~/.vim/autoload ]; then
mkdir -p ~/.vim/autoload
curl -LSso ~/.vim/autoload/pathogen.vim https://tpo.pe/pathogen.vim
fi
# Linking up zsh / vim things.
ln -snf "$mypath/.vimrc" ~/.
if ! [ -L ~/.zshrc ]; then
rm -f ~/.zshrc
ln -snf "$mypath/.zshrc" ~/.
fi
ln -snf "$mypath/.vim/bundle" ~/.vim/.
ln -snf "$mypath/.gitconfig" ~/.
ln -snf "$mypath/themes/spaceship-zsh-theme/spaceship.zsh-theme" ~/.oh-my-zsh/themes/.
test -L ~/.oh-my-zsh/custom/plugins || (rm -rf ~/.oh-my-zsh/custom/plugins && ln -s "$mypath/plugins" ~/.oh-my-zsh/custom/.)
# ctags for vim leetness
install ctags
test -L ~/.ctags || ln -s "$mypath/.ctags" ~/.
# ack for greping shiet
test -x "$(command -v ack)" || install ack
# tmux!
test -x "$(command -v tmux)" || install tmux
# this is on the $PATH in .zshrc, is where i put built shit
mkdir -p ~/.local
# Python and its package manager
# TODO: maybe put this behind a "do u want python? y/n"
install python
install nvm
# brew installs pip w/ python, apt-get does not.
test -x "$(command -v pip)" || ([ "$distro" = "Linux" ] && sudo apt-get install -y python-pip) || [ "$distro" = "Darwin" ]
test -x "$(command -v pyflakes)" || pip3 install --user pyflakes
test -x "$(command -v aws)" || pip3 install --user awscli
if [ "$distro" = "Darwin" ]; then
# set insanely high key repeat value in Mac. aint got time for slow shiet!
defaults write NSGlobalDomain KeyRepeat -int 2
brew install vim
brew install diff-so-fancy
git config --global core.pager "diff-so-fancy | less --tabs=4 -RFX"
# TODO: can probably import iterm2 preferences via plist files. steal from https://github.com/mitsuhiko/dotfiles/tree/master/iterm2
echo "gonna run java so you can open oracle site and download the JRE and JDK. manually. like the pitiful human that you are."
java -version
echo "go set JAVA_HOME in .zshrc"
echo "import the color palette into iterm"
fi
|
filmaj/dotfiles
|
install.sh
|
Shell
|
unlicense
| 3,641 |
#!/bin/sh
T=$(date -Iseconds | tr : - | tr + - | tr T - | awk -F- {'print($4$5$6)'})
screen -AdmS blink_$T /root/blink/while.sh
|
kirr2/MR3020
|
projects/blink/run.sh
|
Shell
|
unlicense
| 129 |
#!/bin/bash -f
# Vivado (TM) v2016.4 (64-bit)
#
# Filename : output_fifo.sh
# Simulator : Aldec Riviera-PRO Simulator
# Description : Simulation script for compiling, elaborating and verifying the project source files.
# The script will automatically create the design libraries sub-directories in the run
# directory, add the library logical mappings in the simulator setup file, create default
# 'do/prj' file, execute compilation, elaboration and simulation steps.
#
# Generated by Vivado on Fri Mar 31 09:04:51 -0400 2017
# IP Build 1755317 on Mon Jan 23 20:30:07 MST 2017
#
# usage: output_fifo.sh [-help]
# usage: output_fifo.sh [-lib_map_path]
# usage: output_fifo.sh [-noclean_files]
# usage: output_fifo.sh [-reset_run]
#
# Prerequisite:- To compile and run simulation, you must compile the Xilinx simulation libraries using the
# 'compile_simlib' TCL command. For more information about this command, run 'compile_simlib -help' in the
# Vivado Tcl Shell. Once the libraries have been compiled successfully, specify the -lib_map_path switch
# that points to these libraries and rerun export_simulation. For more information about this switch please
# type 'export_simulation -help' in the Tcl shell.
#
# You can also point to the simulation libraries by either replacing the <SPECIFY_COMPILED_LIB_PATH> in this
# script with the compiled library directory path or specify this path with the '-lib_map_path' switch when
# executing this script. Please type 'output_fifo.sh -help' for more information.
#
# Additional references - 'Xilinx Vivado Design Suite User Guide:Logic simulation (UG900)'
#
# ********************************************************************************************************
# Script info
echo -e "output_fifo.sh - Script generated by export_simulation (Vivado v2016.4 (64-bit)-id)\n"
# Main steps
run()
{
check_args $# $1
setup $1 $2
compile
simulate
}
# RUN_STEP: <compile>
compile()
{
# Compile design files
source compile.do 2>&1 | tee -a compile.log
}
# RUN_STEP: <simulate>
simulate()
{
runvsimsa -l simulate.log -do "do {simulate.do}"
}
# STEP: setup
setup()
{
case $1 in
"-lib_map_path" )
if [[ ($2 == "") ]]; then
echo -e "ERROR: Simulation library directory path not specified (type \"./output_fifo.sh -help\" for more information)\n"
exit 1
fi
copy_setup_file $2
;;
"-reset_run" )
reset_run
echo -e "INFO: Simulation run files deleted.\n"
exit 0
;;
"-noclean_files" )
# do not remove previous data
;;
* )
copy_setup_file $2
esac
# Add any setup/initialization commands here:-
# <user specific commands>
}
# Copy library.cfg file
copy_setup_file()
{
file="library.cfg"
lib_map_path="<SPECIFY_COMPILED_LIB_PATH>"
if [[ ($1 != "" && -e $1) ]]; then
lib_map_path="$1"
else
echo -e "ERROR: Compiled simulation library directory path not specified or does not exist (type "./top.sh -help" for more information)\n"
fi
if [[ ($lib_map_path != "") ]]; then
src_file="$lib_map_path/$file"
cp $src_file .
fi
}
# Delete generated data from the previous run
reset_run()
{
files_to_remove=(compile.log elaboration.log simulate.log dataset.asdb work riviera)
for (( i=0; i<${#files_to_remove[*]}; i++ )); do
file="${files_to_remove[i]}"
if [[ -e $file ]]; then
rm -rf $file
fi
done
}
# Check command line arguments
check_args()
{
if [[ ($1 == 1 ) && ($2 != "-lib_map_path" && $2 != "-noclean_files" && $2 != "-reset_run" && $2 != "-help" && $2 != "-h") ]]; then
echo -e "ERROR: Unknown option specified '$2' (type \"./output_fifo.sh -help\" for more information)\n"
exit 1
fi
if [[ ($2 == "-help" || $2 == "-h") ]]; then
usage
fi
}
# Script usage
usage()
{
msg="Usage: output_fifo.sh [-help]\n\
Usage: output_fifo.sh [-lib_map_path]\n\
Usage: output_fifo.sh [-reset_run]\n\
Usage: output_fifo.sh [-noclean_files]\n\n\
[-help] -- Print help information for this script\n\n\
[-lib_map_path <path>] -- Compiled simulation library directory path. The simulation library is compiled\n\
using the compile_simlib tcl command. Please see 'compile_simlib -help' for more information.\n\n\
[-reset_run] -- Recreate simulator setup files and library mappings for a clean run. The generated files\n\
from the previous run will be removed. If you don't want to remove the simulator generated files, use the\n\
-noclean_files switch.\n\n\
[-noclean_files] -- Reset previous run, but do not remove simulator generated files from the previous run.\n\n"
echo -e $msg
exit 1
}
# Launch script
run $1 $2
|
TWW12/lzw
|
ip_repo/edit_axi_compression_v1_0.ip_user_files/sim_scripts/output_fifo/riviera/output_fifo.sh
|
Shell
|
unlicense
| 4,823 |
# man zshcontrib
zstyle ':vcs_info:*' actionformats '%F{5}(%f%s%F{5})%F{3}-%F{5}[%F{2}%b%F{3}|%F{1}%a%F{5}]%f '
zstyle ':vcs_info:*' formats '%F{5}(%f%s%F{5})%F{3}-%F{5}[%F{2}%b%F{5}]%f '
zstyle ':vcs_info:*' enable git #svn cvs
# Enable completion caching, use rehash to clear
zstyle ':completion::complete:*' use-cache on
zstyle ':completion::complete:*' cache-path ~/.zsh/cache/$HOST
# Make the list prompt friendly
zstyle ':completion:*' list-prompt '%SAt %p: Hit TAB for more, or the character to insert%s'
# Make the selection prompt friendly when there are a lot of choices
zstyle ':completion:*' select-prompt '%SScrolling active: current selection at %p%s'
# Add simple colors to kill
zstyle ':completion:*:*:kill:*:processes' list-colors '=(#b) #([0-9]#) ([0-9a-z-]#)*=01;34=0=01'
# list of completers to use
zstyle ':completion:*::::' completer _expand _complete _ignored _approximate
zstyle ':completion:*' menu select=1 _complete _ignored _approximate
# insert all expansions for expand completer
# zstyle ':completion:*:expand:*' tag-order all-expansions
# match uppercase from lowercase
zstyle ':completion:*' matcher-list 'm:{a-z}={A-Z}'
# offer indexes before parameters in subscripts
zstyle ':completion:*:*:-subscript-:*' tag-order indexes parameters
# formatting and messages
zstyle ':completion:*' verbose yes
zstyle ':completion:*:descriptions' format '%B%d%b'
zstyle ':completion:*:messages' format '%d'
zstyle ':completion:*:warnings' format 'No matches for: %d'
zstyle ':completion:*:corrections' format '%B%d (errors: %e)%b'
zstyle ':completion:*' group-name ''
# ignore completion functions (until the _ignored completer)
zstyle ':completion:*:functions' ignored-patterns '_*'
zstyle ':completion:*:scp:*' tag-order files users 'hosts:-host hosts:-domain:domain hosts:-ipaddr"IP\ Address *'
zstyle ':completion:*:scp:*' group-order files all-files users hosts-domain hosts-host hosts-ipaddr
zstyle ':completion:*:ssh:*' tag-order users 'hosts:-host hosts:-domain:domain hosts:-ipaddr"IP\ Address *'
zstyle ':completion:*:ssh:*' group-order hosts-domain hosts-host users hosts-ipaddr
zstyle '*' single-ignored show
# pasting with tabs doesn't perform completion
zstyle ':completion:*' insert-tab pending
|
megalithic/bits-and-bobs
|
zsh/completion.zsh
|
Shell
|
unlicense
| 2,242 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_PLATFORM=GNU-Linux
CND_CONF=Debug
CND_DISTDIR=dist
CND_BUILDDIR=build
CND_DLIB_EXT=so
NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/tutorial23
OUTPUT_BASENAME=tutorial23
PACKAGE_TOP_DIR=tutorial23/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package
rm -rf ${NBTMPDIR}
mkdir -p ${NBTMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory "${NBTMPDIR}/tutorial23/bin"
copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/tutorial23.tar
cd ${NBTMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/tutorial23.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${NBTMPDIR}
|
TennisGazelle/cs791ALopez
|
ogldev-source/tutorial23/nbproject/Package-Debug.bash
|
Shell
|
apache-2.0
| 1,457 |
#!/bin/bash
# Creates the mapping for any and all new indexes
curl -XPUT "http://localhost:9200/_template/nodesstats" -d "
{
\"order\" : 0,
\"template\": \"nodesstats*\",
\"settings\" : {
\"number_of_shards\" : 3,
\"number_of_replicas\" : 1
},
\"mappings\" : {
\"node_stats\" : {
\"_all\" : {\"enabled\" : false},
\"dynamic_templates\" : [
{
\"timestamps_as_date\" : {
\"match_pattern\" : \"regex\",
\"path_match\" : \".*timestamp\",
\"mapping\" : {
\"type\" : \"date\"
}
}
},
{
\"strings_not_analyzed\" : {
\"match\" : \"*\",
\"match_mapping_type\" : \"string\",
\"mapping\" : {
\"type\" : \"string\",
\"index\" : \"not_analyzed\"
}
}
}
]
}
}
}"
|
jpodeszwik/elasticsearch-monitoring
|
scripts/elasticsearch-nodesstats-template.sh
|
Shell
|
apache-2.0
| 1,083 |
#!/bin/bash
#
# Output Cassandra cluster and pod status
find_cassandra_pods="kubectl get pods -l name=cassandra"
first_running_seed=$($find_cassandra_pods --no-headers | \
grep Running | \
grep 1/1 | \
head -1 | \
awk '{print $1}')
cluster_status=$(kubectl exec $first_running_seed \
-c cassandra \
-- nodetool status -r)
echo
echo " C* Node Kubernetes Pod"
echo " ------- --------------"
while read -r line; do
node_name=$(echo $line | awk '{print $1}')
status=$(echo "$cluster_status" | grep $node_name | awk '{print $1}')
long_status=$(echo "$status" | \
sed 's/U/ Up/g' | \
sed 's/D/Down/g' | \
sed 's/N/|Normal /g' | \
sed 's/L/|Leaving/g' | \
sed 's/J/|Joining/g' | \
sed 's/M/|Moving /g')
: ${long_status:=" "}
echo "$long_status $line"
done <<< "$($find_cassandra_pods)"
echo
|
vyshane/cassandra-kubernetes
|
cassandra-status.sh
|
Shell
|
apache-2.0
| 876 |
# To run we need to specify the modules path and the entry module name
$JAVA9_HOME/bin/java -p output/mlibs -m simplecalculator
|
wesleyegberto/java-new-features
|
java-9/modules/simple_deps/run_module_calculator.sh
|
Shell
|
apache-2.0
| 127 |
#!/bin/bash
set -e
rm -rf build
npm run build
cd build
python3 -m http.server
cd ..
echo "Ready."
|
codetojoy/gists
|
bash/react_handy_jan_2022/build_and_serve_from_python.sh
|
Shell
|
apache-2.0
| 103 |
#!/bin/sh
# Read the marker line to echo it after stdin is closed
read -r marker_line_after_eof_from_stdin
# Echo all lines received from stdin
while read -r line; do echo "$line"; done
# Show that stdout still function after closing stdin
echo "$marker_line_after_eof_from_stdin"
|
tejksat/docker-java
|
docker-java/src/test/resources/closeStdinWithStdinOnce/echo_stdin.sh
|
Shell
|
apache-2.0
| 281 |
#! /bin/bash
echo "cdh4-node-install.sh: About to install cdh4 packages, mongoDB..."
##updating the sources list first
sudo apt-get update
## installing packages
echo "cdh4-node-install.sh: Installing packages: bigtop-utils bigtop-jsvc bigtop-tomcat hadoop hadoop-hdfs hadoop-httpfs hadoop-mapreduce hadoop-yarn hadoop-client hadoop-0.20-mapreduce hue-plugins hbase hive oozie oozie-client pig zookeeper..."
sudo apt-get -q --force-yes -y install bigtop-utils bigtop-jsvc bigtop-tomcat hadoop hadoop-hdfs hadoop-httpfs hadoop-mapreduce hadoop-yarn hadoop-client hadoop-0.20-mapreduce
sudo apt-get -q --force-yes -y install hue-plugins hbase hive oozie oozie-client pig zookeeper hue flume-ng mahout sqoop2 sqoop2-server
echo "cdh4-node-install.sh: cdh4 packages are installed!..."
echo "cdh4-node-install.sh: Installing packages: mongodb"
sudo apt-get -q --force-yes -y install mongodb-10gen=2.4.4
sudo apt-get -q --force-yes -y install libmysql-java
echo "cdh4-node-install.sh: About to install cloudera agent"
sudo apt-get -q --force-yes -y install cloudera-manager-agent cloudera-manager-daemons
echo "cdh4-node-install.sh: cloudera agents installed!..."
|
fastconnect/cloudify-cloudera
|
services/cloudera/cdh4-node/scripts-sh/cdh4-node-install.sh
|
Shell
|
apache-2.0
| 1,166 |
#!/usr/bin/env bash
set -eu
# This is a trivial example, but this command could run anything to
# calculate the value.
echo 3.4.5
|
dnephin/dobi
|
examples/env-vars/print-version.sh
|
Shell
|
apache-2.0
| 131 |
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
mkdir -p /etc/google-fluentd/files
if [ -z "$FILES_TO_COLLECT" ]; then
exit 0
fi
if [ -z "$FILES_FORMAT" ] ; then
FILES_FORMAT=none
fi
for filepath in $FILES_TO_COLLECT
do
filename=$(basename $filepath)
cat > "/etc/google-fluentd/files/${filename}" << EndOfMessage
<source>
type tail
format $FILES_FORMAT
time_key time
path ${filepath}
pos_file /etc/google-fluentd/fluentd-gcp-${filename}.pos
time_format %Y-%m-%dT%H:%M:%S
tag file.${filename}
read_from_head true
</source>
EndOfMessage
done
|
drud/vault-consul-on-kube
|
sidecar_logging_image/config_generator.sh
|
Shell
|
apache-2.0
| 1,119 |
# Run this on MacOS with Xcode from the diplib directory
# set $PYPI_TOKEN to the PyPI token for the diplib project
# This is a modified version of `deploy_macos.sh`, written for macOS on
# an Apple Silicon machine. Homebrew offers only 3.8 and up for these
# machines, and by default installs in a different directory (/opt/homebrew/
# instead of /usr/local/).
# It is assumed that CMake and Xcode are already installed.
# Setup
export BUILD_THREADS=6
export DELOCATE=`pwd`/tools/travis/delocate
brew install [email protected]
brew install [email protected]
brew install [email protected]
# The install above might have changed the default version of `python3`, so we need to reinstall packages:
python3 -m pip install setuptools wheel twine delocate
brew install wget
mkdir build
cd build
wget https://downloads.openmicroscopy.org/bio-formats/6.5.0/artifacts/bioformats_package.jar
# Basic configuration
cmake .. -DDIP_PYDIP_WHEEL_INCLUDE_LIBS=On -DBIOFORMATS_JAR=`pwd`/bioformats_package.jar -DDIP_BUILD_DIPIMAGE=Off
# Python 3.8
export PYTHON=/opt/homebrew/opt/[email protected]/bin/python3
export PYTHON_VERSION=3.8
cmake .. -DPYBIND11_PYTHON_VERSION=$PYTHON_VERSION -DPYTHON_EXECUTABLE=$PYTHON
make -j $BUILD_THREADS bdist_wheel
python3 $DELOCATE -w wheelhouse/ -v pydip/staging/dist/*.whl
# Python 3.9
export PYTHON=/opt/homebrew/opt/[email protected]/bin/python3
export PYTHON_VERSION=3.9
cmake .. -DPYBIND11_PYTHON_VERSION=$PYTHON_VERSION -DPYTHON_EXECUTABLE=$PYTHON
make -j $BUILD_THREADS bdist_wheel
python3 $DELOCATE -w wheelhouse/ -v pydip/staging/dist/*.whl
# Python 3.10
export PYTHON=/opt/homebrew/opt/[email protected]/bin/python3
export PYTHON_VERSION=3.10
cmake .. -DPYBIND11_PYTHON_VERSION=$PYTHON_VERSION -DPYTHON_EXECUTABLE=$PYTHON
make -j $BUILD_THREADS bdist_wheel
python3 $DELOCATE -w wheelhouse/ -v pydip/staging/dist/*.whl
# Upload to pypi.org
python3 -m twine upload -u __token__ -p $PYPI_TOKEN wheelhouse/*.whl
|
DIPlib/diplib
|
tools/travis/deploy_macos_m1.sh
|
Shell
|
apache-2.0
| 1,909 |
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
# Set Environment Variables to install Shoptimizer.
# Please change the values for your project.
GCP_PROJECT=[PROJECT ID]
SOURCE_REPO=[Name of the Git Cloud Source Repository to create]
|
google/shoptimizer
|
env.sh
|
Shell
|
apache-2.0
| 774 |
#!/bin/bash
# https://github.com/apache/bigtop/tree/master/bigtop-test-framework
# "ITEST" is an integration testing framework written for and by the
# apache bigtop project. It consists of typical java tools/libraries
# such as junit, gradle and maven.
# This script is a helper to run itest on any hadoop system without
# requiring intimate knowledge of bigtop. If running for the first
# time, simply execute ./run_itest.sh without any arguments. If you
# want more information, use these additional parameters:
#
# --info - turns on the log4j output
# --debug - turns up the log4j output to maximum
# --traceback - shows tracebacks from tests
set_hdfs_home() {
HDFS_HOMEDIR="/user/$(whoami)"
# Validate it exists ...
hadoop fs -test -d $HDFS_HOMEDIR
RC=$?
if [ $RC != 0 ];then
echo "Creating hdfs:/$HDFS_HOMEDIR"
su -l hdfs -c "hadoop fs -mkdir $HDFS_HOMEDIR"
fi
# Validate the owner is you ...
OWNER=$(hadoop fs -stat "%u" /user/root)
if [[ "$OWNER" != "$(whoami)" ]]; then
echo "Chowning hdfs:/$HDFS_HOMEDIR"
su -l hdfs -c "hadoop fs -chown -R $(whoami) $HDFS_HOMEDIR"
fi
}
set_java_home() {
#####################################################################
# Use bigtop's bigtop-detect-javahome if JAVA_HOME is not already set
#####################################################################
REPO_URL="https://github.com/apache/bigtop/blob/master"
SCRIPT_PATH="bigtop-packages/src/common/bigtop-utils/bigtop-detect-javahome"
SCRIPT_URL="$REPO_URL/$SCRIPT_PATH"
if [ -z "$JAVA_HOME" ]; then
# Get the javahome script if not already available
if [ ! -f $BIGTOP_HOME/$SCRIPT_PATH ]; then
if [ ! -f /tmp/bigtop-detect-javahome ]; then
curl $SCRIPT_URL -o /tmp/bigtop-detect-javahome
fi
source /tmp/bigtop-detect-javahome
else
source $BIGTOP_HOME/$SCRIPT_PATH
fi
fi
echo "# DEBUG: JAVA_HOME=$JAVA_HOME"
}
set_hadoop_vars() {
#####################################################################
# Set the HADOOP_MAPRED_HOME and HADOOP_CONF vars
#####################################################################
# ITEST wants the MR dir with the examples jar ...
# java.lang.AssertionError: Can't find hadoop-examples.jar file
if ( [ -z "$HADOOP_MAPRED_HOME" ] || [ -z "$HADOOP_CONF_DIR" ] ); then
# ODP follows an HDP convention ...
if [ -d /usr/odp/current ]; then
echo "# DEBUG: ODP DETECTED"
if ( [ -z "$HADOOP_CONF_DIR" ] && [ -d /etc/hadoop/conf ] ); then
export HADOOP_CONF_DIR=/etc/hadoop/conf
fi
if ( [ -z "$HADOOP_MAPRED_HOME" ] && [ -d /usr/odp/current/hadoop-mapreduce-client ] ); then
export HADOOP_MAPRED_HOME=/usr/odp/current/hadoop-mapreduce-client
fi
# HDP sometimes has client dirs
elif [ -d /usr/hdp/current ]; then
echo "# DEBUG: HDP DETECTED"
if ( [ -z "$HADOOP_CONF_DIR" ] && [ -d /etc/hadoop/conf ] ); then
export HADOOP_CONF_DIR=/etc/hadoop/conf
fi
if ( [ -z "$HADOOP_MAPRED_HOME" ] && [ -d /usr/hdp/current/hadoop-mapreduce-client ] ); then
export HADOOP_MAPRED_HOME=/usr/hdp/current/hadoop-mapreduce-client
fi
# CDH may or may not have been deployed with parcels ...
elif [ -d /opt/cloudera/parcels/CDH/jars ]; then
echo "# DEBUG: CDH DETECTED"
if ( [ -z "$HADOOP_CONF_DIR" ] && [ -d /etc/hadoop/conf ] ); then
export HADOOP_CONF_DIR=/etc/hadoop/conf
fi
if ( [ -z "$HADOOP_MAPRED_HOME" ] ); then
# This is the only dir that contains the examples jars on cdh5.2.x
export HADOOP_MAPRED_HOME=/opt/cloudera/parcels/CDH/jars
fi
fi
fi
if ( [ -z "$HADOOP_MAPRED_HOME" ] || [ -z "$HADOOP_CONF_DIR" ] ); then
echo "# DEBUG: HADOOP_MAPRED_HOME OR HADOOP_CONF not set"
###############################
# Discover non-HDP paths
###############################
# try using "hadoop classpath" output
MAXMR=0
for CP in $(hadoop classpath | tr ':' '\n'); do
# os.path.abspath
CP=$(readlink -e $CP)
if [ -z "$HADOOP_CONF_DIR" ]; then
# HADOOP_CONF_DIR
if ( [[ "$CP" == */conf* ]] && [[ "$CP" == */hadoop/* ]] ); then
if ( [ -d $CP ] && [ -f $CP/core-site.xml ] ); then
export HADOOP_CONF_DIR=$CP
continue
fi
fi
fi
if [ -z "$HADDOOP_MAPRED_HOME" ]; then
# HADOOP_MAPRED_HOME (use the path with the most jars)
JARCOUNT=$(ls $CP/hadoop-mapreduce*.jar 2>/dev/null | wc -l)
if [ $JARCOUNT -gt 0 ]; then
if ( [ $JARCOUNT -gt $MAXMR ] ); then
export HADOOP_MAPRED_HOME=$CP
MAXMR=$JARCOUNT
fi
fi
fi
done
fi
echo "# DEBUG: HADOOP_CONF_DIR=$HADOOP_CONF_DIR"
echo "# DEBUG: HADOOP_MAPRED_HOME=$HADOOP_MAPRED_HOME"
}
print_tests() {
echo "######################################################"
echo "# RESULTS #"
echo "######################################################"
for TEST in $(echo $ITESTS | tr ',' '\n'); do
TESTDIR=$BIGTOP_HOME/bigtop-tests/smoke-tests/$TEST/build
if [ -d $TESTDIR ]; then
cd $TESTDIR
for FILE in $(find -L reports/tests/classes -type f -name "*.html"); do
echo "## $TESTDIR/$FILE"
if [ $(which links) ]; then
links $FILE -dump
else
echo "PLEASE INSTALL LINKS: sudo yum -y install links"
fi
echo ""
done
fi
done
}
export ITEST="0.7.0"
export ODP_HOME="/tmp/odp"
# Check the hdfs homedir
set_hdfs_home
# SET BIGTOP_HOME AND GET THE CODE
export BIGTOP_HOME=/tmp/bigtop_home
echo "# DEBUG: BIGTOP_HOME=$BIGTOP_HOME"
if [ ! -d $BIGTOP_HOME ]; then
echo "# DEBUG: cloning $BIGTOP_HOME from github"
git clone --depth 1 https://github.com/apache/bigtop -b branch-1.0 $BIGTOP_HOME
else
echo "# DEBUG: $BIGTOP_HOME already cloned"
fi
# SET JAVA_HOME
set_java_home
# SET HADOOP SERVICE HOMES
set_hadoop_vars
echo "######################################################"
echo "# STARTING ITEST #"
echo "######################################################"
echo "# Use --debug/--info/--stacktrace for more details"
# SET THE DEFAULT TESTS
if [ -z "$ITESTS" ]; then
#export ITESTS="odp-example,mapreduce"
export ITESTS="mapreduce"
fi
# Link the example odp test into the tests dir
if [ ! -L $BIGTOP_HOME/bigtop-tests/smoke-tests/odp-example ]; then
ln -s $PWD/odp_itest_example $BIGTOP_HOME/bigtop-tests/smoke-tests/odp-example
fi
# CALL THE GRADLE WRAPPER TO RUN THE FRAMEWORK
cd $BIGTOP_HOME/bigtop-tests/smoke-tests/
./gradlew clean test -Dsmoke.tests=$ITESTS $@
# SHOW RESULTS (HTML)
print_tests
|
jctanner/odp-scripts
|
run_itest.sh
|
Shell
|
apache-2.0
| 7,501 |
#!/bin/bash
# *****************************************************************
#
# (C) Copyright IBM Corporation 2018.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# *****************************************************************
source ./deploy.config
function askForAdminUser() {
echo "For the default XSLD Admin User \"xsadmin\". Enter a password (default: vgwAdmin4xs!): "
read -rs xsadminPass
xsadminPass=${xsadminPass:-vgwAdmin4xs!}
}
function waitOnTaskCommand() {
local xsldAddress=$1
local taskID=$2
local xsAdminPassword=$3
local errorMessage=$4
./wait-on-task-command.sh "${xsldAddress}" "${taskID}" "${xsAdminPassword}"
returnValue=$?
if [ "$returnValue" -ne 0 ]; then
echo "$errorMessage"
exit 1
fi
}
function joinMembers() {
printStep "Joining Members, this may take a while..."
for member in ${memberIPs}; do
echo " Joining ${member} to ${masterIP}"
taskID=$(curl -s -k -u "xsadmin:${xsadminPass}" "https://${member}:9445/wxsadmin/v1/task" -d "{\"command\": \"NewMemberJoinTaskCommand\",\"description\": \"add a new member\",\"parameters\": {\"secretKey\": \"${secretKey}\",\"memberName\": \"${member}\",\"cacheMemberGroupHost\": \"${masterIP}\",\"isCatalog\": \"true\"}}" -H "Content-Type:application/json" -X POST)
taskID=$(echo "${taskID}" | cut -d':' -f2 | cut -d'}' -f1)
waitOnTaskCommand "${member}" "${taskID}" "${xsadminPass}" "Joining members failed, try cleaning up your deployment by running ./cleanupsms-xsld.sh, alternatively you can check the dashboard task progress and pull the logs as shown here https://www.ibm.com/support/knowledgecenter/en/SSTVLU_8.6.1/com.ibm.websphere.extremescale.doc/txstraceserverlogxsld.html"
done
printStep "Completed, above mentioned members joined."
echo
}
function printStep() {
stepName=$1
echo "Step: $stepName"
}
function loadCatalogServerIdentities() {
xsldServerIPs=$(kubectl get pods -o wide | grep ${deploymentName} | awk '{ print $6 }')
masterIP=$(echo ${xsldServerIPs} | cut -d' ' -f1)
memberIPs=$(echo ${xsldServerIPs} | cut -d' ' -f2-)
}
if [ ! -z "$1" ]; then
xsadminPass=$1
elif [ -z "$xsadminPass" ]; then
askForAdminUser
fi
secretKey=$xsadminPass
loadCatalogServerIdentities
joinMembers
|
WASdev/sample.voice.gateway
|
sms/high-availability/deploy-xsld/join-xsld-members.sh
|
Shell
|
apache-2.0
| 2,814 |
#!/bin/bash
#
# Launch Gerrit Code Review as a daemon process.
# To get the service to restart correctly on reboot, uncomment below (3 lines):
# ========================
# chkconfig: 3 99 99
# description: Gerrit Code Review
# processname: gerrit
# ========================
### BEGIN INIT INFO
# Provides: gerrit
# Required-Start: $named $remote_fs $syslog
# Required-Stop: $named $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start/stop Gerrit Code Review
# Description: Gerrit is a web based code review system, facilitating online code reviews
# for projects using the Git version control system.
### END INIT INFO
# Configuration files:
#
# /etc/default/gerritcodereview
# If it exists, sourced at the start of this script. It may perform any
# sequence of shell commands, like setting relevant environment variables.
#
# The files will be checked for existence before being sourced.
# Configuration variables. These may be set in /etc/default/gerritcodereview.
#
# GERRIT_SITE
# Path of the Gerrit site to run. $GERRIT_SITE/etc/gerrit.config
# will be used to configure the process.
#
# GERRIT_WAR
# Location of the gerrit.war download that we will execute. Defaults to
# container.war property in $GERRIT_SITE/etc/gerrit.config.
#
# NO_START
# If set to "1" disables Gerrit from starting.
#
# START_STOP_DAEMON
# If set to "0" disables using start-stop-daemon. This may need to
# be set on SuSE systems.
if test -f /lib/lsb/init-functions ; then
. /lib/lsb/init-functions
fi
usage() {
me=`basename "$0"`
echo >&2 "Usage: $me {start|stop|restart|check|status|run|supervise|threads} [-d site]"
exit 1
}
test $# -gt 0 || usage
##################################################
# Some utility functions
##################################################
running() {
test -f $1 || return 1
PID=`cat $1`
ps ax -o pid | grep -w $PID >/dev/null 2>/dev/null || return 1
return 0
}
thread_dump() {
test -f $1 || return 1
PID=`cat $1`
$JSTACK $PID || return 1
return 0;
}
get_config() {
if test -f "$GERRIT_CONFIG" ; then
if test "x$1" = x--int ; then
# Git might not be able to expand "8g" properly. If it gives
# us 0 back retry for the raw string and expand ourselves.
#
n=`git config --file "$GERRIT_CONFIG" --int "$2"`
if test x0 = "x$n" ; then
n=`git config --file "$GERRIT_CONFIG" --get "$2"`
case "$n" in
*g) n=`expr ${n%%g} \* 1024`m ;;
*k) n=`expr ${n%%k} \* 1024` ;;
*) : ;;
esac
fi
echo "$n"
else
git config --file "$GERRIT_CONFIG" $1 "$2"
fi
fi
}
#Required to determine the version of the
#gerrit.war file before attempting to start it
check_is_replicated_war() {
version=$(check_war_version $1)
if [[ $version =~ "RP" ]]; then
return 0
else
return 1
fi
}
#Return the war version.
check_war_version() {
version=$($JAVA -jar $1 version)
echo $version
}
##################################################
# Get the action and options
##################################################
ACTION=$1
shift
while test $# -gt 0 ; do
case "$1" in
-d|--site-path)
shift
GERRIT_SITE=$1
shift
;;
-d=*)
GERRIT_SITE=${1##-d=}
shift
;;
--site-path=*)
GERRIT_SITE=${1##--site-path=}
shift
;;
--props=*)
JAVA_PROPS=${1//--props=}
shift
;;
*)
usage
esac
done
test -z "$NO_START" && NO_START=0
test -z "$START_STOP_DAEMON" && START_STOP_DAEMON=1
##################################################
# See if there's a default configuration file
##################################################
if test -f /etc/default/gerritcodereview ; then
. /etc/default/gerritcodereview
fi
##################################################
# Set tmp if not already set.
##################################################
if test -z "$TMP" ; then
TMP=/tmp
fi
TMPJ=$TMP/j$$
##################################################
# Reasonable guess marker for a Gerrit site path.
##################################################
GERRIT_INSTALL_TRACE_FILE=etc/gerrit.config
##################################################
# No git in PATH? Needed for gerrit.config parsing
##################################################
if type git >/dev/null 2>&1 ; then
: OK
else
echo >&2 "** ERROR: Cannot find git in PATH"
exit 1
fi
##################################################
# Try to determine GERRIT_SITE if not set
##################################################
if test -z "$GERRIT_SITE" ; then
GERRIT_SITE_1=`dirname "$0"`/..
if test -f "${GERRIT_SITE_1}/${GERRIT_INSTALL_TRACE_FILE}" ; then
GERRIT_SITE=${GERRIT_SITE_1}
fi
fi
##################################################
# No GERRIT_SITE yet? We're out of luck!
##################################################
if test -z "$GERRIT_SITE" ; then
echo >&2 "** ERROR: GERRIT_SITE not set"
exit 1
fi
INITIAL_DIR=`pwd`
if cd "$GERRIT_SITE" ; then
GERRIT_SITE=`pwd`
else
echo >&2 "** ERROR: Gerrit site $GERRIT_SITE not found"
exit 1
fi
#####################################################
# Check that Gerrit is where we think it is
#####################################################
GERRIT_CONFIG="$GERRIT_SITE/$GERRIT_INSTALL_TRACE_FILE"
test -f "$GERRIT_CONFIG" || {
echo "** ERROR: Gerrit is not initialized in $GERRIT_SITE"
exit 1
}
test -r "$GERRIT_CONFIG" || {
echo "** ERROR: $GERRIT_CONFIG is not readable!"
exit 1
}
GERRIT_PID="$GERRIT_SITE/logs/gerrit.pid"
GERRIT_RUN="$GERRIT_SITE/logs/gerrit.run"
GERRIT_TMP="$GERRIT_SITE/tmp"
export GERRIT_TMP
##################################################
# Check for JAVA_HOME
##################################################
JAVA_HOME_OLD="$JAVA_HOME"
JAVA_HOME=`get_config --get container.javaHome`
if test -z "$JAVA_HOME" ; then
JAVA_HOME="$JAVA_HOME_OLD"
fi
if test -z "$JAVA_HOME" ; then
# If a java runtime is not defined, search the following
# directories for a JVM and sort by version. Use the highest
# version number.
JAVA_LOCATIONS="\
/usr/java \
/usr/bin \
/usr/local/bin \
/usr/local/java \
/usr/local/jdk \
/usr/local/jre \
/usr/lib/jvm \
/opt/java \
/opt/jdk \
/opt/jre \
"
for N in java jdk jre ; do
for L in $JAVA_LOCATIONS ; do
test -d "$L" || continue
find $L -name "$N" ! -type d | grep -v threads | while read J ; do
test -x "$J" || continue
VERSION=`eval "$J" -version 2>&1`
test $? = 0 || continue
VERSION=`expr "$VERSION" : '.*"\(1.[0-9\.]*\)["_]'`
test -z "$VERSION" && continue
expr "$VERSION" \< 1.2 >/dev/null && continue
echo "$VERSION:$J"
done
done
done | sort | tail -1 >"$TMPJ"
JAVA=`cat "$TMPJ" | cut -d: -f2`
JVERSION=`cat "$TMPJ" | cut -d: -f1`
rm -f "$TMPJ"
JAVA_HOME=`dirname "$JAVA"`
while test -n "$JAVA_HOME" \
-a "$JAVA_HOME" != "/" \
-a ! -f "$JAVA_HOME/lib/tools.jar" ; do
JAVA_HOME=`dirname "$JAVA_HOME"`
done
test -z "$JAVA_HOME" && JAVA_HOME=
echo "** INFO: Using $JAVA"
fi
if test -z "$JAVA" \
-a -n "$JAVA_HOME" \
-a -x "$JAVA_HOME/bin/java" \
-a ! -d "$JAVA_HOME/bin/java" ; then
JAVA="$JAVA_HOME/bin/java"
fi
if test -z "$JAVA" ; then
echo >&2 "Cannot find a JRE or JDK. Please ensure that the JAVA_HOME environment"
echo >&2 "variable or container.javaHome in $GERRIT_SITE/etc/gerrit.config is"
echo >&2 "set to a valid >=1.8 JRE location"
exit 1
fi
if test -z "$JSTACK"; then
JSTACK="$JAVA_HOME/bin/jstack"
fi
#####################################################
# Add Gerrit properties to Java VM options.
#####################################################
GERRIT_OPTIONS=`get_config --get-all container.javaOptions | tr '\n' ' ' | sed -e 's/log4j\.Log4jBackendFactory#getInstance/slf4j.Slf4jBackendFactory#getInstance/g'`
if test -n "$GERRIT_OPTIONS" ; then
JAVA_OPTIONS="$JAVA_OPTIONS $GERRIT_OPTIONS"
fi
GERRIT_MEMORY=`get_config --get container.heapLimit`
if test -n "$GERRIT_MEMORY" ; then
JAVA_OPTIONS="$JAVA_OPTIONS -Xmx$GERRIT_MEMORY"
fi
GERRIT_FDS=`get_config --int core.packedGitOpenFiles`
test -z "$GERRIT_FDS" && GERRIT_FDS=128
FDS_MULTIPLIER=2
USE_LFS=`get_config --get lfs.plugin`
test -n "$USE_LFS" && FDS_MULTIPLIER=3
GERRIT_FDS=`expr $FDS_MULTIPLIER \* $GERRIT_FDS`
test $GERRIT_FDS -lt 1024 && GERRIT_FDS=1024
GERRIT_STARTUP_TIMEOUT=`get_config --get container.startupTimeout`
test -z "$GERRIT_STARTUP_TIMEOUT" && GERRIT_STARTUP_TIMEOUT=90 # seconds
GERRIT_USER=`get_config --get container.user`
#####################################################
# Configure sane ulimits for a daemon of our size.
#####################################################
ulimit -c 0 ; # core file size
ulimit -d unlimited ; # data seg size
ulimit -f unlimited ; # file size
ulimit -m >/dev/null 2>&1 && ulimit -m unlimited ; # max memory size
ulimit -n $GERRIT_FDS ; # open files
ulimit -t unlimited ; # cpu time
ulimit -v unlimited ; # virtual memory
ulimit -x >/dev/null 2>&1 && ulimit -x unlimited ; # file locks
#####################################################
# This is how the Gerrit server will be started
#####################################################
if test -z "$GERRIT_WAR" ; then
GERRIT_WAR=`get_config --get container.war`
fi
if test -z "$GERRIT_WAR" ; then
GERRIT_WAR="$GERRIT_SITE/bin/gerrit.war"
test -f "$GERRIT_WAR" || GERRIT_WAR=
fi
if test -z "$GERRIT_WAR" -a -n "$GERRIT_USER" ; then
for homedirs in /home /Users ; do
if test -d "$homedirs/$GERRIT_USER" ; then
GERRIT_WAR="$homedirs/$GERRIT_USER/gerrit.war"
if test -f "$GERRIT_WAR" ; then
break
else
GERRIT_WAR=
fi
fi
done
fi
if test -z "$GERRIT_WAR" ; then
echo >&2 "** ERROR: Cannot find gerrit.war (try setting \$GERRIT_WAR)"
exit 1
fi
test -z "$GERRIT_USER" && GERRIT_USER=`whoami`
RUN_ARGS="-jar $GERRIT_WAR daemon -d $GERRIT_SITE"
if test "`get_config --bool container.slave`" = "true" ; then
RUN_ARGS="$RUN_ARGS --slave --enable-httpd --headless"
fi
DAEMON_OPTS=`get_config --get-all container.daemonOpt`
if test -n "$DAEMON_OPTS" ; then
RUN_ARGS="$RUN_ARGS $DAEMON_OPTS"
fi
if test -n "$JAVA_PROPS" ; then
RUN_ARGS="$JAVA_PROPS $RUN_ARGS"
fi
if test -n "$JAVA_OPTIONS" ; then
RUN_ARGS="$JAVA_OPTIONS $RUN_ARGS"
fi
if test -x /usr/bin/perl ; then
# If possible, use Perl to mask the name of the process so its
# something specific to us rather than the generic 'java' name.
#
export JAVA
RUN_EXEC=/usr/bin/perl
RUN_Arg1=-e
RUN_Arg2='$x=$ENV{JAVA};exec $x @ARGV;die $!'
RUN_Arg3='-- GerritCodeReview'
else
RUN_EXEC=$JAVA
RUN_Arg1=
RUN_Arg2='-DGerritCodeReview=1'
RUN_Arg3=
fi
##################################################
# Do the action
##################################################
case "$ACTION" in
start)
if ! check_is_replicated_war $GERRIT_WAR; then
echo "** ERROR: $GERRIT_WAR version [ $(check_war_version $GERRIT_WAR ) ] doesn't contain RP. It is not a WANdisco replicated version."
exit 1
fi
if [[ -z "$(get_config --get noteDb.changes.sequenceBatchSize)" ]] ; then
echo "** ERROR: noteDb.changes.sequenceBatchSize is not set. See WANdisco documentation."
exit 1
fi
printf '%s' "Starting Gerrit Code Review: "
if test 1 = "$NO_START" ; then
echo "Not starting gerrit - NO_START=1 in /etc/default/gerritcodereview"
exit 0
fi
test -z "$UID" && UID=`id | sed -e 's/^[^=]*=\([0-9]*\).*/\1/'`
RUN_ID=`date +%s`.$$
RUN_ARGS="$RUN_ARGS --run-id=$RUN_ID"
if test 1 = "$START_STOP_DAEMON" && type start-stop-daemon >/dev/null 2>&1
then
test $UID = 0 && CH_USER="-c $GERRIT_USER"
if start-stop-daemon -S -b $CH_USER \
-p "$GERRIT_PID" -m \
-d "$GERRIT_SITE" \
-a "$RUN_EXEC" -- $RUN_Arg1 "$RUN_Arg2" $RUN_Arg3 $RUN_ARGS
then
: OK
else
rc=$?
if test $rc = 127; then
echo >&2 "fatal: start-stop-daemon failed"
rc=1
fi
exit $rc
fi
else
if test -f "$GERRIT_PID" ; then
if running "$GERRIT_PID" ; then
echo "Already Running!!"
exit 0
else
rm -f "$GERRIT_PID" "$GERRIT_RUN"
fi
fi
if test $UID = 0 -a -n "$GERRIT_USER" ; then
touch "$GERRIT_PID"
chown $GERRIT_USER "$GERRIT_PID"
su - $GERRIT_USER -s /bin/sh -c "
JAVA='$JAVA' ; export JAVA ;
$RUN_EXEC $RUN_Arg1 '$RUN_Arg2' $RUN_Arg3 $RUN_ARGS </dev/null >/dev/null 2>&1 &
PID=\$! ;
disown ;
echo \$PID >\"$GERRIT_PID\""
else
$RUN_EXEC $RUN_Arg1 "$RUN_Arg2" $RUN_Arg3 $RUN_ARGS </dev/null >/dev/null 2>&1 &
PID=$!
type disown >/dev/null 2>&1 && disown
echo $PID >"$GERRIT_PID"
fi
fi
PID=`cat "$GERRIT_PID"`
if test $UID = 0; then
if test -f "/proc/${PID}/oom_score_adj" ; then
echo -1000 > "/proc/${PID}/oom_score_adj"
else
if test -f "/proc/${PID}/oom_adj" ; then
echo -16 > "/proc/${PID}/oom_adj"
fi
fi
elif [ "$(uname -s)"=="Linux" ] && test -d "/proc/${PID}"; then
echo "WARNING: Could not adjust Gerrit's process for the kernel's out-of-memory killer."
echo " This may be caused by ${0} not being run as root."
echo " Consider changing the OOM score adjustment manually for Gerrit's PID=${PID} with e.g.:"
echo " echo '-1000' | sudo tee /proc/${PID}/oom_score_adj"
fi
TIMEOUT="$GERRIT_STARTUP_TIMEOUT"
sleep 1
while running "$GERRIT_PID" && test $TIMEOUT -gt 0 ; do
if test "x$RUN_ID" = "x`cat $GERRIT_RUN 2>/dev/null`" ; then
echo OK
exit 0
fi
sleep 2
TIMEOUT=`expr $TIMEOUT - 2`
done
echo FAILED
exit 1
;;
stop)
printf '%s' "Stopping Gerrit Code Review: "
if test 1 = "$START_STOP_DAEMON" && type start-stop-daemon >/dev/null 2>&1
then
start-stop-daemon -K -p "$GERRIT_PID" -s HUP
sleep 1
if running "$GERRIT_PID" ; then
sleep 3
if running "$GERRIT_PID" ; then
sleep 30
if running "$GERRIT_PID" ; then
start-stop-daemon -K -p "$GERRIT_PID" -s KILL
fi
fi
fi
rm -f "$GERRIT_PID" "$GERRIT_RUN"
echo OK
else
PID=`cat "$GERRIT_PID" 2>/dev/null`
TIMEOUT=30
while running "$GERRIT_PID" && test $TIMEOUT -gt 0 ; do
kill $PID 2>/dev/null
sleep 1
TIMEOUT=`expr $TIMEOUT - 1`
done
test $TIMEOUT -gt 0 || kill -9 $PID 2>/dev/null
rm -f "$GERRIT_PID" "$GERRIT_RUN"
echo OK
fi
;;
restart)
GERRIT_SH=$0
if test -f "$GERRIT_SH" ; then
: OK
else
GERRIT_SH="$INITIAL_DIR/$GERRIT_SH"
if test -f "$GERRIT_SH" ; then
: OK
else
echo >&2 "** ERROR: Cannot locate gerrit.sh"
exit 1
fi
fi
$GERRIT_SH stop $*
sleep 5
$GERRIT_SH start $*
exit $?
;;
supervise)
#
# Under control of daemontools supervise monitor which
# handles restarts and shutdowns via the svc program.
#
exec "$RUN_EXEC" $RUN_Arg1 "$RUN_Arg2" $RUN_Arg3 $RUN_ARGS
;;
run|daemon)
echo "Running Gerrit Code Review:"
if test -f "$GERRIT_PID" ; then
if running "$GERRIT_PID" ; then
echo "Already Running!!"
exit 0
else
rm -f "$GERRIT_PID"
fi
fi
exec "$RUN_EXEC" $RUN_Arg1 "$RUN_Arg2" $RUN_Arg3 $RUN_ARGS --console-log
;;
check|status)
echo "Checking arguments to Gerrit Code Review:"
echo " GERRIT_SITE = $GERRIT_SITE"
echo " GERRIT_CONFIG = $GERRIT_CONFIG"
echo " GERRIT_PID = $GERRIT_PID"
echo " GERRIT_TMP = $GERRIT_TMP"
echo " GERRIT_WAR = $GERRIT_WAR"
echo " GERRIT_FDS = $GERRIT_FDS"
echo " GERRIT_USER = $GERRIT_USER"
echo " GERRIT_STARTUP_TIMEOUT = $GERRIT_STARTUP_TIMEOUT"
echo " JAVA = $JAVA"
echo " JAVA_OPTIONS = $JAVA_OPTIONS"
echo " RUN_EXEC = $RUN_EXEC $RUN_Arg1 '$RUN_Arg2' $RUN_Arg3"
echo " RUN_ARGS = $RUN_ARGS"
echo
if test -f "$GERRIT_PID" ; then
if running "$GERRIT_PID" ; then
echo "Gerrit running pid="`cat "$GERRIT_PID"`
exit 0
fi
fi
exit 3
;;
threads)
if running "$GERRIT_PID" ; then
thread_dump "$GERRIT_PID"
exit 0
else
echo "Gerrit not running?"
fi
exit 3
;;
*)
usage
;;
esac
exit 0
|
WANdisco/gerrit
|
resources/com/google/gerrit/pgm/init/gerrit.sh
|
Shell
|
apache-2.0
| 17,080 |
pkg_name=sshpass
pkg_origin=core
pkg_version="1.06"
pkg_maintainer="The Habitat Maintainers <[email protected]>"
pkg_license=("GPL-2.0-or-later")
pkg_filename="${pkg_name}-${pkg_version}.tar.gz"
pkg_source="https://downloads.sourceforge.net/project/${pkg_name}/${pkg_name}/${pkg_version}/${pkg_filename}"
pkg_shasum="c6324fcee608b99a58f9870157dfa754837f8c48be3df0f5e2f3accf145dee60"
pkg_build_deps=(lilian/make lilian/gcc)
pkg_bin_dirs=(bin)
pkg_description="Non-interactive ssh password auth"
pkg_upstream_url="https://sourceforge.net/projects/sshpass/"
|
be-plans/be
|
sshpass/plan.sh
|
Shell
|
apache-2.0
| 555 |
#!/bin/bash
if [ ! -d /home/vagrant/.ssh ]; then
mkdir -p /home/vagrant/.ssh
chown vagrant:vagrant /home/vagrant/.ssh
fi
if [ ! -f /home/vagrant/.ssh/authorized_keys ]; then
wget --no-check-certificate https://raw.github.com/mitchellh/vagrant/master/keys/vagrant.pub -O /home/vagrant/.ssh/authorized_keys || exit 1
chown vagrant:vagrant /home/vagrant/.ssh/authorized_keys || exit 1
fi
echo "vagrant ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
|
Sher-Chowdhury/vagrant-openldap
|
scripts/set-private-keys.sh
|
Shell
|
apache-2.0
| 448 |
#!/usr/bin/env bash
cd ..
git pull origin master
mvn clean install -Dmaven.test.skip=true
mvn package -Dmaven.test.skip=true
#docker-compose -f ./eureka/docker/docker-compose.yml up -d --build --force-recreate;
docker-compose -f ./Api/docker/docker-compose.yml up -d --build --force-recreate;
docker-compose -f ./OrderService/docker/docker-compose.yml up -d --build --force-recreate;
docker-compose -f ./CommunityService/docker/docker-compose.yml up -d --build --force-recreate;
docker-compose -f ./StoreService/docker/docker-compose.yml up -d --build --force-recreate;
docker-compose -f ./UserService/docker/docker-compose.yml up -d --build --force-recreate;
docker-compose -f ./FrontService/docker/docker-compose.yml up -d --build --force-recreate;
docker-compose -f ./FeeService/docker/docker-compose.yml up -d --build --force-recreate;
docker-compose -f ./CommonService/docker/docker-compose.yml up -d --build --force-recreate;
|
java110/MicroCommunity
|
docs/bin/reBuildAll.sh
|
Shell
|
apache-2.0
| 990 |
#!/bin/bash
source ~/.profile
source ~/.rvm/scripts/rvm
rvm --version
# rvm install ruby-1.8.7
# rvm use ruby-1.8.7
rvm install ruby-1.9.3
rvm use ruby-1.9.3
# rvm install ruby-2.0.0
# rvm use ruby-2.0.0
set -e
ruby -v
echo "gem version"
gem --version
gem install bundler --no-rdoc --no-ri
bundle install --without development
bundle --version
gem update --system 2.1.11
bundle exec rake syntax
bundle exec rake lint
bundle exec rake ci:setup:rspec spec
bundle exec rubocop
#bundle exec rake spec
#ruby syntax check
rubocop
# for windows
install ruby 1.9.3 for windows
install puppet 3.7.1 msi (https://downloads.puppetlabs.com/windows/)
gem install bundler --no-rdoc --no-ri
bundle install --without development
gem update --system 2.1.11
set PUPPET_VERSION=3.7.1
bundle update
SET PATH="C:\Program Files (x86)\Puppet Labs\Puppet\bin";"C:\Program Files (x86)\Puppet Labs\Puppet\facter\bin";"C:\Program Files (x86)\Puppet Labs\Puppet\hiera\bin";"C:\Program Files (x86)\Puppet Labs\Puppet\bin";"C:\Program Files (x86)\Puppet Labs\Puppet\sys\tools\bin";%PATH%
SET RUBYLIB="C:\Program Files (x86)\Puppet Labs\Puppet\lib";"C:\Program Files (x86)\Puppet Labs\Puppet\facter\lib";"C:\Program Files (x86)\Puppet Labs\Puppet\hiera\lib";%RUBYLIB%;
bundle exec rake syntax
bundle exec rake lint
bundle exec rake ci:setup:rspec spec
|
biemond/vagrant-puppetmaster
|
puppet/modules/orawls/test.sh
|
Shell
|
apache-2.0
| 1,333 |
#!/usr/bin/expect
set timeout 30
if { [llength $argv] < 1} {
puts "Usage:"
puts "$argv0 ip"
exit 1
}
spawn ssh banmo.ch@[lindex $argv 0]
expect {
"*assword:*" {
set timeout 1000
send "xxxxxxxxxx\r"
#exp_continue
}
"*es/no)?*" {
send "yes\r"
exp_continue
}
timeout {
puts "connect is timeout"
exit 3
}
}
send "cd /home/admin/[lindex $argv 1]/logs/\n"
send "tail -f -n 100 [lindex $argv 1].log\n"
interact
|
ch710798472/MyTools
|
script/mac/ssh.sh
|
Shell
|
apache-2.0
| 535 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
# Some vrange nastyness that has been broken in the past
B=`basename $0 .sh`
cd `dirname $0`
source ./rungraphd
rm -rf $D
# Note the embedded tabs in request 4 and 6.
rungraphd -d${D} -bty <<-EOF
write id="1" (value="mw1806_propval_bcdefghi")
read id="2" (value<="mw1806_propval_bcdefghi" result=((guid value)))
read id="3" (value>="mw1806_propval_bcdefghi" result=((guid value)))
write id="4" (comparator="octet" value="1729715 Township of Nutley Civil NJ 34 Essex 013 404900N 0740929W 40.8167669 -74.1579217 18 Orange" )
read id="5" (value="1729715 Township of Nutley Civil NJ 34 Essex 013 404900N 0740929W 40.8167669 -74.1579217 18 Orange")
read (comparator="octet" value="1729715 Township of Nutley Civil NJ 34 Essex 013 404900N 0740929W 40.8167669 -74.1579217 18 Orange" )
write id="6" (value="")
write id="7" ()
read id="8" (value>"sam")
write id="9" (value=" x ")
read id="10" (value="x")
write id="11" ( value="x y")
read id="12" (value="x")
write id="13" ( value=" a b c ")
read id="14" (value="a b c ")
read id="15" (value="abc")
EOF
rm -rf $D
|
googlearchive/graphd
|
test/unit/david_5.sh
|
Shell
|
apache-2.0
| 1,736 |
#!/bin/bash
echo "`pwd`"
echo `g++-4.9 -o ../build/maejs ../convert.cpp -static-libgcc -static-libstdc++ -static`
output="$(../build/maejs trash.aes memes.aes)"
echo "$output"
|
waffleston/modulaether-script
|
test/test.sh
|
Shell
|
apache-2.0
| 176 |
#!/bin/bash
printf 'Which Linux distribution do you know? '
read DISTR
case $DISTR in
ubuntu)
echo "I know it! It is an operating system based on Debian."
;;
centos|rhel)
echo "Hey! It is my favorite Server OS!"
;;
windows)
echo "Very funny..."
;;
*)
echo "Hmm, seems i've never used it."
;;
esac
|
becloudready/devopstraining
|
bash/conditions/case.sh
|
Shell
|
apache-2.0
| 398 |
#!/bin/bash
set -ex
export KOKORO_GITHUB_DIR=${KOKORO_ROOT}/src/github
source ${KOKORO_GFILE_DIR}/kokoro/common.sh
gcloud -q components update beta
cd ${KOKORO_GITHUB_DIR}/php-docker
export TAG=`date +%Y-%m-%d-%H-%M`
cp "${PHP_DOCKER_GOOGLE_CREDENTIALS}" \
./service_account.json
# For nightly build
if [ "${GOOGLE_PROJECT_ID}" = "php-mvm-a-28051" ]; then
gcloud config set project php-mvm-a-28051
fi
scripts/run_acceptance_tests.sh
|
GoogleCloudPlatform/php-docker
|
scripts/acceptance.sh
|
Shell
|
apache-2.0
| 447 |
#!/bin/sh
###
### Job name
#SBATCH --job-name=THA_bpdn_uniform_q3
### Time limit of total run time of the job
#SBATCH --time=500:00:00
### Number of nodes required for the job
#SBATCH --nodes=1
### Number of processor cores
#SBATCH -n 8
#SBATCH --sockets-per-node=2
#SBATCH --cores-per-socket=4
### Amount of memory per core
#SBATCH --mem=24000
### Output
#SBATCH -o THA_bpdn_uniform_q3_out.%J
#SBATCH -e THA_bpdn_uniform_q3_err.%J
### The actual job execution export
cd ~/public-repos/corr-cs-code-repo/code/bpdn/
/pack/matlab/bin/matlab -nodisplay -r "addpath('~/matlab_toolboxes/spgl1-1.8/','~/public-repos/corr-cs-code-repo/code/');matlabpool open local 8;test_uniform_ord_vs_scale(3)"
|
ThomasA/cs-correlated-noise
|
code/bpdn/test_uniform_ord_vs_scale_q3.sh
|
Shell
|
apache-2.0
| 691 |
#!/bin/bash
#
# For testing installer
#
unit_test_main() {
local arg=${1:-** call with arg1 **}
if [[ $arg != arg1 ]]; then
install_err "$arg: install_extra_args failed"
fi
if ! install_download data1 | grep -s -q value1; then
install_err 'data1: install_download failed'
fi
local sentinel=value$RANDOM
install_exec echo "$sentinel"
if ! grep -s -q "$sentinel" "$install_log_file"; then
install_err "$sentinel: install_exec failed"
fi
sentinel=value$RANDOM
install_info "$sentinel"
if grep -s -q "$sentinel" "$install_log_file"; then
install_err "$sentinel: install_info failed"
fi
sentinel=value$RANDOM
install_verbose=1 install_info "$sentinel"
if ! grep -s -q "$sentinel" "$install_log_file"; then
install_err "$sentinel: install_verbose=1 install_info failed"
fi
echo PASSED
install_url radiasoft/download installers
install_tmp_dir
install_script_eval rpm-code/codes.sh
codes_download container-test
}
|
radiasoft/download
|
installers/unit-test/radiasoft-download.sh
|
Shell
|
apache-2.0
| 1,035 |
#!/bin/bash
# Generate test coverage statistics for Go packages.
#
# Works around the fact that `go test -coverprofile` currently does not work
# with multiple packages, see https://code.google.com/p/go/issues/detail?id=6909
#
# Usage: script/coverage.sh [--html]
#
# --html Additionally create HTML report and open it in browser
#
set -e
workdir=.cover
profile="$workdir/cover.out"
mode=count
generate_cover_data() {
rm -rf "$workdir"
mkdir "$workdir"
for pkg in "$@"; do
f="$workdir/$(echo $pkg | tr / -).cover"
cover=""
if ! [[ "$pkg" =~ ^github\.com\/prebid\/prebid\-server$ ]]; then
cover="-covermode=$mode -coverprofile=$f"
fi
go test ${cover} "$pkg"
done
echo "mode: $mode" >"$profile"
grep -h -v "^mode:" "$workdir"/*.cover >>"$profile"
}
show_cover_report() {
go tool cover -${1}="$profile"
}
generate_cover_data $(go list ./... | grep -v /vendor/)
#show_cover_report func
case "$1" in
"")
;;
--html)
show_cover_report html ;;
*)
echo >&2 "error: invalid option: $1"; exit 1 ;;
esac
|
prebid/prebid-server
|
scripts/coverage.sh
|
Shell
|
apache-2.0
| 1,101 |
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generates updated api-reference docs from the latest swagger spec.
# Usage: ./update-api-reference-docs.sh <absolute output path>
set -o errexit
set -o nounset
set -o pipefail
echo "Note: This assumes that swagger spec has been updated. Please run hack/update-swagger-spec.sh to ensure that."
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
DEFAULT_OUTPUT="${KUBE_ROOT}/docs/api-reference"
OUTPUT=${1:-${DEFAULT_OUTPUT}}
# Use REPO_DIR if provided so we can set it to the host-resolvable path
# to the repo root if we are running this script from a container with
# docker mounted in as a volume.
# We pass the host output dir as the source dir to `docker run -v`, but use
# the regular one to compute diff (they will be the same if running this
# test on the host, potentially different if running in a container).
REPO_DIR=${REPO_DIR:-"${KUBE_ROOT}"}
TMP_SUBPATH="_output/generated_html"
OUTPUT_TMP_IN_HOST="${REPO_DIR}/${TMP_SUBPATH}"
OUTPUT_TMP="${KUBE_ROOT}/${TMP_SUBPATH}"
echo "Generating api reference docs at ${OUTPUT_TMP}"
DEFAULT_GROUP_VERSIONS="v1 extensions/v1beta1 batch/v1 autoscaling/v1 certificates/v1alpha1"
VERSIONS=${VERSIONS:-$DEFAULT_GROUP_VERSIONS}
for ver in $VERSIONS; do
mkdir -p "${OUTPUT_TMP}/${ver}"
done
SWAGGER_PATH="${REPO_DIR}/api/swagger-spec/"
echo "Reading swagger spec from: ${SWAGGER_PATH}"
user_flags="-u $(id -u)"
if [[ $(uname) == "Darwin" ]]; then
# mapping in a uid from OS X doesn't make any sense
user_flags=""
fi
for ver in $VERSIONS; do
TMP_IN_HOST="${OUTPUT_TMP_IN_HOST}/${ver}"
if [[ ${ver} == "v1" ]]; then
REGISTER_FILE="${REPO_DIR}/pkg/api/${ver}/register.go"
else
REGISTER_FILE="${REPO_DIR}/pkg/apis/${ver}/register.go"
fi
SWAGGER_JSON_NAME="$(kube::util::gv-to-swagger-name "${ver}")"
docker run ${user_flags} \
--rm -v "${TMP_IN_HOST}":/output:z \
-v "${SWAGGER_PATH}":/swagger-source:z \
-v "${REGISTER_FILE}":/register.go:z \
--net=host -e "https_proxy=${KUBERNETES_HTTPS_PROXY:-}" \
gcr.io/google_containers/gen-swagger-docs:v6 \
"${SWAGGER_JSON_NAME}"
done
# Check if we actually changed anything
pushd "${OUTPUT_TMP}" > /dev/null
touch .generated_html
find . -type f | cut -sd / -f 2- | LC_ALL=C sort > .generated_html
popd > /dev/null
while read file; do
if [[ -e "${OUTPUT}/${file}" && -e "${OUTPUT_TMP}/${file}" ]]; then
echo "comparing ${OUTPUT}/${file} with ${OUTPUT_TMP}/${file}"
# Filter all munges from original content.
original=$(cat "${OUTPUT}/${file}")
generated=$(cat "${OUTPUT_TMP}/${file}")
# Filter out meaningless lines with timestamps
original=$(echo "${original}" | grep -v "Last updated" || :)
generated=$(echo "${generated}" | grep -v "Last updated" || :)
# By now, the contents should be normalized and stripped of any
# auto-managed content.
if diff -B >/dev/null <(echo "${original}") <(echo "${generated}"); then
# actual contents same, overwrite generated with original.
cp "${OUTPUT}/${file}" "${OUTPUT_TMP}/${file}"
fi
fi
done <"${OUTPUT_TMP}/.generated_html"
echo "Moving api reference docs from ${OUTPUT_TMP} to ${OUTPUT}"
cp -af "${OUTPUT_TMP}"/* "${OUTPUT}"
rm -r "${OUTPUT_TMP}"
# ex: ts=2 sw=2 et filetype=sh
|
gtank/kubernetes
|
hack/update-api-reference-docs.sh
|
Shell
|
apache-2.0
| 3,910 |
#!/usr/bin/env bash
DOCKER=`/usr/bin/which docker` || { /bin/echo "docker not found in path... $?"; exit 1;}
usage() {
cat <<EOF
Usage: stop-cluster.sh
EOF
}
${DOCKER} stop $(docker ps -a -q --filter="name=sfs_example")|| { echo "failed to stop containers ${DIRECTORY}. Exiting... $?"; exit 1;}
|
pitchpoint-solutions/sfs
|
sfs-example/stop-cluster.sh
|
Shell
|
apache-2.0
| 305 |
#!/bin/bash
# Repo ref: https://github.com/jhajek/itmo-544-444-env/blob/master/install-webserver.sh
sudo apt-get update -y
sudo apt-get install -y apache2 git php5 php5-mysql php5-curl mysql-client curl php5-imagick
sudo git clone https://github.com/Langenoir1878/FP.git
mv ./FP/images /var/www/html/images
mv ./FP/*.png /var/www/html
mv ./FP/*.js /var/www/html
mv ./FP/*.css /var/www/html
mv ./FP/*.php /var/www/html
mv ./FP/*.eot /var/www/html
mv ./FP/*.svg /var/www/html
mv ./FP/*.ttf /var/www/html
mv ./FP/*.woff /var/www/html
mv ./FP/*.woff2 /var/www/html
mv ./FP/css var/www/html
mv ./FP/js var/www/html
mv ./FP/fonts var/www/html
curl -sS https://getcomposer.org/installer | sudo php &> /tmp/getcomposer.txt
sudo php composer.phar require aws/aws-sdk-php &> /tmp/runcomposer.txt
sudo mv vendor /var/www/html &> /tmp/movevendor.txt
sudo php /var/www/html/setup.php &> /tmp/database-setup.txt
echo "Hi, install-webserver.sh has been called!" > /tmp/hello.txt
|
Langenoir1878/FP
|
install-webserver.sh
|
Shell
|
artistic-2.0
| 982 |
#!/bin/sh
wget https://packagecontrol.io/Package%20Control.sublime-package -P ~/.config/sublime-text-3/Installed\ Packages/
|
gunzy83/dotfiles
|
dot_scripts/run_once_after_12_sublime_package_control.sh
|
Shell
|
bsd-2-clause
| 124 |
#!/bin/bash
set -e
VERSION=3.9.2-r1
SQLITE_VERSION=3.9.2
wget -O apsw.zip \
"https://github.com/rogerbinns/apsw/releases/download/${VERSION}/apsw-${VERSION}.zip"
rm -rf "apsw-${VERSION}"
unzip apsw.zip
cd "apsw-${VERSION}"
python setup.py fetch --version="${SQLITE_VERSION}" --missing-checksum-ok --all build --enable-all-extensions install
|
octobear2/ob2
|
build_apsw.sh
|
Shell
|
bsd-2-clause
| 349 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.