code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
# Copyright (c) 2012-2017 Red Hat, Inc
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
set -e
echo "[CHE] This script is going to wait until Postgres is deployed and available"
command -v oc >/dev/null 2>&1 || { echo >&2 "[CHE] [ERROR] Command line tool oc (https://docs.openshift.org/latest/cli_reference/get_started_cli.html) is required but it's not installed. Aborting."; exit 1; }
command -v jq >/dev/null 2>&1 || { echo >&2 "[CHE] [ERROR] Command line tool jq (https://stedolan.github.io/jq) is required but it's not installed. Aborting."; exit 1; }
available=$(oc get dc postgres -o json | jq '.status.conditions[] | select(.type == "Available") | .status')
progressing=$(oc get dc postgres -o json | jq '.status.conditions[] | select(.type == "Progressing") | .status')
DEPLOYMENT_TIMEOUT_SEC=1200
POLLING_INTERVAL_SEC=5
end=$((SECONDS+DEPLOYMENT_TIMEOUT_SEC))
while [[ "${available}" != "\"True\"" || "${progressing}" != "\"True\"" ]] && [ ${SECONDS} -lt ${end} ]; do
available=$(oc get dc postgres -o json | jq '.status.conditions[] | select(.type == "Available") | .status')
progressing=$(oc get dc postgres -o json | jq '.status.conditions[] | select(.type == "Progressing") | .status')
timeout_in=$((end-SECONDS))
echo "[CHE] Deployment is in progress...(Available.status=${available}, Progressing.status=${progressing}, Timeout in ${timeout_in}s)"
sleep ${POLLING_INTERVAL_SEC}
done
if [ "${progressing}" == "\"True\"" ]; then
echo "[CHE] Postgres deployed successfully"
elif [ "${progressing}" == "False" ]; then
echo "[CHE] [ERROR] Postgres deployment failed. Aborting. Run command 'oc rollout status postgres' to get more details."
exit 1
elif [ ${SECONDS} -ge ${end} ]; then
echo "[CHE] [ERROR] Deployment timeout. Aborting."
exit 1
fi
echo
echo
|
sleshchenko/che
|
dockerfiles/init/modules/openshift/files/scripts/multi-user/wait_until_postgres_is_available.sh
|
Shell
|
epl-1.0
| 1,999 |
#!/bin/bash
MUSER="$1"
MPASS="$2"
MDB="$3"
# Detect paths
MYSQL=$(which mysql)
AWK=$(which awk)
GREP=$(which grep)
if [ $# -ne 3 ]
then
echo "Usage: $0 {MySQL-User-Name} {MySQL-User-Password} {MySQL-Database-Name}"
echo "Drops all tables from a MySQL"
exit 1
fi
TABLES=$($MYSQL -u $MUSER -p$MPASS $MDB -h db416105782.db.1and1.com -e 'show tables' | $AWK '{ print $1}' | $GREP -v '^Tables' )
for t in $TABLES
do
echo "Deleting $t table from $MDB database..."
$MYSQL -u $MUSER -p$MPASS $MDB -h db416105782.db.1and1.com -e "drop table $t"
done
|
alexinfopruna/hipicmedia2013
|
backups/droptables.sh
|
Shell
|
gpl-2.0
| 578 |
#!/bin/sh
#
# $Id$
#
# This file is part of the OpenLink Software Virtuoso Open-Source (VOS)
# project.
#
# Copyright (C) 1998-2012 OpenLink Software
#
# This project is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; only version 2 of the License, dated June 1991.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
. ./test_fn.sh
DSN=$PORT
#PARAMETERS FOR HTTP TEST
USERS=10
nreq=1
CLICKS=10
THOST=localhost
TPORT=$HTTPPORT
HTTPPORT1=`expr $HTTPPORT + 1`
HTTPPORT2=`expr $HTTPPORT + 2`
#SERVER=M2 # OVERRIDE
LOGFILE=`pwd`/thttp.output
export LOGFILE
. ./test_fn.sh
DSN=$PORT
GenURIall ()
{
ECHO "Creating uri file for VSPX test"
file=allVSPX.uri
echo "$THOST $TPORT" > $file
for vspxfile in `grep -l brief *.vspx` ; do
echo "$CLICKS GET /$vspxfile HTTP/1.0" >> $file
echo "$CLICKS GET /$vspxfile HTTP/1.1" >> $file
done
chmod 644 $file
}
httpGet ()
{
file=$1
if [ "$2" -gt "0" ]
then
pipeline="-P -c $2"
else
pipeline=""
fi
user=${3-dba}
pass=${4-dba}
../urlsimu $file $pipeline -u $user -p $pass
}
waitAll ()
{
clients=1
while [ "$clients" -gt "0" ]
do
sleep 1
clients=`ps -e | grep urlsimu | grep -v grep | wc -l`
# echo -e "Running clients $clients\r"
done
}
checkRes ()
{
result=0
result=`grep '200 OK' $1 | wc -l`
if [ "$result" -eq "$2" ]
then
ECHO "PASSED: $3 $result clicks"
else
ECHO "*** FAILED: $3 $result clicks, $2 expected."
fi
}
checkHTTPLog ()
{
log_lines=0
log_lines=`grep '["]GET' vspx/http*.log | wc -l`
temp=`grep -l brief vspx/*.vspx | wc -l`
expected_log_lines=`expr $CLICKS \* $temp \* 2 \* $USERS`
log_lines=`expr $log_lines`
if [ "$log_lines" -eq "$expected_log_lines" ]
then
ECHO "PASSED: HTTP Log test"
else
ECHO "*** FAILED: HTTP Log test, $expected_log_lines expected $log_lines actual"
fi
}
# SQL command
DoCommand()
{
_dsn=$1
command=$2
shift
shift
echo "+ " $ISQL $_dsn dba dba ERRORS=STDOUT VERBOSE=OFF PROMPT=OFF "EXEC=$command" $* >> $LOGFILE
$ISQL $_dsn dba dba ERRORS=STDOUT VERBOSE=OFF PROMPT=OFF "EXEC=$command" $* >> $LOGFILE
if test $? -ne 0
then
LOG "***FAILED: $command"
else
LOG "PASSED: $command"
fi
}
# For CASE MODE 2 until all VSPX code is managed to run under 1
MakeConfig ()
{
#echo "CREATING CONFIGURATION FOR SERVER '$1'"
case $SERVER in
*[Mm2]*)
file=wi.cfg
cat > $file <<END_CFG
database_file: wi.db
log_file: wi.trx
number_of_buffers: 2000
max_dirty_buffers: 1200
max_checkpoint_remap: 20000
file_extend: 200
threads: 100
atomic_dive: 1
case_mode: 2
db_name: vspxtest
replication_server: 1
replication_queue: 1000000
autocheckpoint: 10
scheduler_period: 0
dirs_allowed: /, c:\\, d:\\, e:\\
sql_optimizer: $SQLOPTIMIZE
pl_debug: $PLDBG
test_coverage: cov.xml
SQL_ROWSET_SIZE: 100
SQL_PREFETCH_BYTES: 12000
callstack_on_exception: 1
HTTPLogFile: http.log
http_port: $HTTPPORT
http_threads: 3
http_keep_alive_timeout: 15
http_max_keep_alives: 6
http_max_cached_proxy_connections: 10
http_proxy_connection_cache_timeout: 15
END_CFG
;;
*virtuoso*)
file=virtuoso.ini
cat > $file <<END_CFG
[Database]
DatabaseFile = virtuoso.db
TransactionFile = virtuoso.trx
ErrorLogFile = virtuoso.log
ErrorLogLevel = 7
FileExtend = 200
Striping = 0
Syslog = 0
;
; Server parameters
;
[Parameters]
ServerPort = $PORT
ServerThreads = 100
CheckpointInterval = 60
NumberOfBuffers = 2000
MaxDirtyBuffers = 1200
MaxCheckpointRemap = 20000
UnremapQuota = 0
AtomicDive = 1
PrefixResultNames = 0
CaseMode = 2
DisableMtWrite = 0
SchedulerInterval = 0
DirsAllowed = /, c:\\, d:\\, e:\\
PLDebug = $PLDBG
TestCoverage = cov.xml
SQLOptimizer = $SQLOPTIMIZE
[HTTPServer]
HTTPLogFile = http.log
ServerPort = $HTTPPORT
ServerRoot = .
ServerThreads = 3
MaxKeepAlives = 6
KeepAliveTimeout = 15
MaxCachedProxyConnections = 10
ProxyConnectionCacheTimeout = 15
CallstackOnException: 1
[Client]
SQL_ROWSET_SIZE = 100
SQL_PREFETCH_BYTES = 12000
[Replication]
ServerName = vspxtest
ServerEnable = 1
QueueMax = 1000000
END_CFG
;;
esac
chmod 644 $file
}
MakeIni ()
{
MAKECFG_FILE ../$TESTCFGFILE $PORT $CFGFILE
case $SERVER in
*[Mm]2*)
cat >> $CFGFILE <<END_HTTP
callstack_on_exception: 1
HTTPLogFile: http.log
http_port: $HTTPPORT
http_threads: 3
http_keep_alive_timeout: 15
http_max_keep_alives: 6
http_max_cached_proxy_connections: 10
http_proxy_connection_cache_timeout: 15
END_HTTP
;;
*virtuoso*)
MAKECFG_FILE ../$TESTCFGFILE $PORT $CFGFILE
cat >> $CFGFILE <<END_HTTP1
[HTTPServer]
HTTPLogFile = http.log
ServerPort = $HTTPPORT
ServerRoot = .
ServerThreads = 3
MaxKeepAlives = 6
KeepAliveTimeout = 15
MaxCachedProxyConnections = 10
ProxyConnectionCacheTimeout = 15
CallstackOnException: 1
END_HTTP1
;;
esac
}
BANNER "STARTED SERIES OF VSPX TESTS"
NOLITE
ECHO "VSPX Server test ($CLICKS per page)"
#CLEANUP
STOP_SERVER
rm -f $DBLOGFILE $DBFILE
rm -rf vspx
mkdir vspx
cd vspx
cp -f $HOME/binsrc/vspx/examples/*.vspx .
cp -f $HOME/binsrc/vspx/examples/*.xml .
cp -f $HOME/binsrc/vspx/examples/*.xsl .
cp -f $HOME/binsrc/vspx/vspx_demo_init.sql .
# code file for code behind example
cat >> code_file__0.sql <<END_COD
drop type my_page_subclass
;
create type my_page_subclass under DB.dba.page__code__file____0_vspx
temporary self as ref
overriding method vc_post_b1 (control vspx_button, e vspx_event) returns any,
method button_change (control vspx_button) returns any
;
create method vc_post_b1 (inout control vspx_button, inout e vspx_event) for my_page_subclass
{
if (not control.vc_focus) return;
dbg_vspx_control (control);
self.button_change (control);
return;
}
;
create method button_change (inout control vspx_button) for my_page_subclass
{
self.var1 := self.var1 + 1;
control.ufl_value := 'Activated';
}
;
END_COD
GenURIall
#MakeIni
MakeConfig
CHECK_PORT $TPORT
START_SERVER $DSN 1000
sleep 1
cd ..
DoCommand $DSN "DB.DBA.VHOST_DEFINE ('*ini*', '*ini*', '/', '/', 0, 0, NULL, NULL, NULL, NULL, 'dba', NULL, NULL, 0);"
RUN $ISQL $DSN PROMPT=OFF VERBOSE=OFF ERRORS=STDOUT < nwdemo.sql
if test $STATUS -ne 0
then
LOG "***ABORTED: tvspx.sh: loading northwind data"
exit 3
fi
RUN $ISQL $DSN PROMPT=OFF VERBOSE=OFF ERRORS=STDOUT < vspx/vspx_demo_init.sql
if test $STATUS -ne 0
then
LOG "***ABORTED: tvspx.sh: loading vspx_demo_init.sql"
exit 3
fi
if [ "x$HOST_OS" = "x" -a "x$NO_PERF" = "x" ] ; then
ECHO "STARTED: test with $USERS clients"
count=1
while [ "$count" -le "$USERS" ] ; do
httpGet vspx/allVSPX.uri 0 > vspx/allVSPXres.$count &
count=`expr $count + 1`
done
waitAll
temp=`grep -l brief vspx/*.vspx | wc -l`
expected_OK_lines=`expr $CLICKS \* $temp \* 2 \* $USERS`
checkRes 'vspx/allVSPXres.*' $expected_OK_lines 'VSPX test'
checkHTTPLog
fi
SHUTDOWN_SERVER
CHECK_LOG
BANNER "COMPLETED SERIES OF VSPX TESTS"
|
trueg/virtuoso-opensource
|
binsrc/tests/suite/tvspx.sh
|
Shell
|
gpl-2.0
| 7,492 |
#!/bin/sh
# Copyright (C) 2009-2013 OpenWrt.org
. /lib/functions/leds.sh
. /lib/ar71xx.sh
get_status_led() {
case $(ar71xx_board_name) in
alfa-nx)
status_led="alfa:green:led_8"
;;
all0305)
status_led="eap7660d:green:ds4"
;;
ap132)
status_led="ap132:green:status"
;;
ap136-010|\
ap136-020)
status_led="ap136:green:status"
;;
ap135-020)
status_led="ap135:green:status"
;;
ap81)
status_led="ap81:green:status"
;;
ap83)
status_led="ap83:green:power"
;;
ap96)
status_led="ap96:green:led2"
;;
aw-nr580)
status_led="aw-nr580:green:ready"
;;
bullet-m | rocket-m | nano-m | nanostation-m | nanostation-m-xw | loco-m-xw)
status_led="ubnt:green:link4"
;;
bxu2000n-2-a1)
status_led="bhu:green:status"
;;
cap4200ag)
status_led="senao:green:pwr"
;;
cpe510)
status_led="tp-link:green:link4"
;;
db120)
status_led="db120:green:status"
;;
dgl-5500-a1 |\
dhp-1565-a1|\
dir-505-a1 |\
dir-600-a1 |\
dir-615-e1 |\
dir-615-e4)
status_led="d-link:green:power"
;;
dir-615-c1)
status_led="d-link:green:status"
;;
dir-825-b1)
status_led="d-link:orange:power"
;;
dir-825-c1 |\
dir-835-a1)
status_led="d-link:amber:power"
;;
dragino2)
status_led="dragino2:red:system"
;;
eap300v2)
status_led="engenius:blue:power"
;;
eap7660d)
status_led="eap7660d:green:ds4"
;;
el-mini | \
el-m150)
status_led="easylink:green:system"
;;
f9k1115v2)
status_led="belkin:blue:status"
;;
gl-inet)
status_led="gl-connect:green:lan"
;;
esr1750)
status_led="esr1750:amber:power"
;;
esr900)
status_led="engenius:amber:power"
;;
hiwifi-hc6361)
status_led="hiwifi:blue:system"
;;
hornet-ub)
status_led="alfa:blue:wps"
;;
ja76pf | \
ja76pf2)
status_led="jjplus:green:led1"
;;
ls-sr71)
status_led="ubnt:green:d22"
;;
mc-mac1200r)
status_led="mercury:green:system"
;;
mr600)
status_led="mr600:orange:power"
;;
mr600v2)
status_led="mr600:blue:power"
;;
mr900 | \
mr900v2)
status_led="mr900:blue:power"
;;
mynet-n600 | \
mynet-n750)
status_led="wd:blue:power"
;;
qihoo-c301)
status_led="360:green:status"
;;
mynet-rext)
status_led="wd:blue:power"
;;
mzk-w04nu | \
mzk-w300nh)
status_led="planex:green:status"
;;
nbg460n_550n_550nh)
status_led="nbg460n:green:power"
;;
nbg6716)
status_led="zyxel:white:power"
;;
om2p | \
om2pv2 | \
om2p-hs | \
om2p-hsv2 | \
om2p-lc)
status_led="om2p:blue:power"
;;
om5p | \
om5p-an)
status_led="om5p:blue:power"
;;
pb44)
status_led="pb44:amber:jump1"
;;
rb-2011l|\
rb-2011uas|\
rb-2011uas-2hnd)
status_led="rb:green:usr"
;;
rb-411 | rb-411u | rb-433 | rb-433u | rb-450 | rb-450g | rb-493)
status_led="rb4xx:yellow:user"
;;
rb-750)
status_led="rb750:green:act"
;;
rb-911g-2hpnd|\
rb-911g-5hpnd|\
rb-912uag-2hpnd|\
rb-912uag-5hpnd)
status_led="rb:green:user"
;;
rb-951ui-2hnd)
status_led="rb:green:act"
;;
rb-sxt2n|\
rb-sxt5n)
status_led="rb:green:power"
;;
routerstation | routerstation-pro)
status_led="ubnt:green:rf"
;;
rw2458n)
status_led="rw2458n:green:d3"
;;
smart-300)
status_led="nc-link:green:system"
;;
oolite)
status_led="oolite:red:system"
;;
tew-632brp)
status_led="tew-632brp:green:status"
;;
tew-673gru)
status_led="trendnet:blue:wps"
;;
tew-712br|\
tew-732br)
status_led="trendnet:green:power"
;;
tl-mr3020)
status_led="tp-link:green:wps"
;;
tl-wa750re)
status_led="tp-link:orange:re"
;;
tl-wa850re)
status_led="tp-link:blue:re"
;;
tl-wa860re)
status_led="tp-link:green:power"
;;
tl-mr3220 | \
tl-mr3220-v2 | \
tl-mr3420 | \
tl-mr3420-v2 | \
tl-wa701nd-v2 | \
tl-wa801nd-v2 | \
tl-wa901nd | \
tl-wa901nd-v2 | \
tl-wa901nd-v3 | \
tl-wdr3320-v2 | \
tl-wdr3500 | \
tl-wdr6300 | \
tl-wr1041n-v2 | \
tl-wr1043nd | \
tl-wr1043nd-v2 | \
tl-wr741nd | \
tl-wr741nd-v4 | \
tl-wr841n-v1 | \
tl-wr841n-v7 | \
tl-wr841n-v8 | \
tl-wa830re-v2 | \
tl-wr842n-v2 | \
tl-wr941nd | \
tl-wr941nd-v7 | \
tl-wr941nd-v5)
status_led="tp-link:green:system"
;;
archer-c5 | \
archer-c7 | \
tl-wdr4900-v2 | \
tl-mr10u | \
tl-mr12u | \
tl-mr13u | \
mw4530r | \
tl-wdr4300 | \
tl-wr703n | \
tl-wr710n | \
tl-wr720n-v3)
status_led="tp-link:blue:system"
;;
tl-wr841n-v9)
status_led="tp-link:green:wlan"
;;
tl-wr2543n)
status_led="tp-link:green:wps"
;;
tl-wr882n-v1)
status_led="tp-link:white:status"
;;
tube2h)
status_led="alfa:green:signal4"
;;
unifi)
status_led="ubnt:green:dome"
;;
uap-pro)
status_led="ubnt:white:dome"
;;
unifi-outdoor-plus)
status_led="ubnt:white:front"
;;
airgateway)
status_led="ubnt:white:status"
;;
whr-g301n | \
whr-hp-g300n | \
whr-hp-gn | \
wzr-hp-g300nh)
status_led="buffalo:green:router"
;;
wlae-ag300n)
status_led="buffalo:green:status"
;;
wzr-hp-ag300h | \
wzr-hp-g300nh2)
status_led="buffalo:red:diag"
;;
r6100 | \
wndap360 | \
wndr3700 | \
wndr3700v4 | \
wndr4300 | \
wnr2000 | \
wnr2200 |\
wnr612-v2 |\
wnr1000-v2)
status_led="netgear:green:power"
;;
wp543)
status_led="wp543:green:diag"
;;
wpj558)
status_led="wpj558:green:sig3"
;;
wrt400n)
status_led="wrt400n:blue:wps"
;;
wrt160nl)
status_led="wrt160nl:blue:wps"
;;
zcn-1523h-2 | zcn-1523h-5)
status_led="zcn-1523h:amber:init"
;;
wlr8100)
status_led="sitecom:amber:status"
;;
esac
}
set_state() {
get_status_led
case "$1" in
preinit)
status_led_blink_preinit
;;
failsafe)
status_led_blink_failsafe
;;
preinit_regular)
status_led_blink_preinit_regular
;;
done)
status_led_on
case $(ar71xx_board_name) in
qihoo-c301)
if cat /proc/mtd | grep action_image_config; then
local n=$(fw_printenv activeregion | cut -d = -f 2)
fw_setenv "image${n}trynum" 0
fi
;;
esac
;;
esac
}
|
okcom84301/openwrt
|
target/linux/ar71xx/base-files/etc/diag.sh
|
Shell
|
gpl-2.0
| 5,838 |
boinc init
cpu_time.exe 50 &
echo "hello world !"
boinc finish 0
|
atisu/genwrapper
|
apps/test/test_cpu_time.sh
|
Shell
|
gpl-2.0
| 71 |
#!/bin/bash
if [ -z "$DB_DIR" ]; then
echo "Need to set DB_DIR"
exit 1
fi
if [ ! -d "$DB_DIR" ]; then
echo "Need to create directory DB_DIR"
exit 1
fi
export TOKUDB_COMPRESSION=lzma
export TOKUDB_ROW_FORMAT=tokudb_${TOKUDB_COMPRESSION}
export NUM_TABLES=16
export NUM_DATABASES=1
export RUN_TIME_SECONDS=300
export RAND_TYPE=uniform
export NUM_ROWS=10000000
export TOKUDB_READ_BLOCK_SIZE=64K
export BENCHMARK_NUMBER=004
export MYSQL_STORAGE_ENGINE=tokudb
export TOKUDB_CACHE=8GB
export INNODB_CACHE=8GB
export LOADER_LOGGING=Y
export BENCHMARK_LOGGING=Y
export READONLY=off
export MYSQL_DATABASE=sbtest
export MYSQL_USER=root
export threadCountList="0064 0128"
# *******************************************************************************************
# MARIADB 10.0.8
# *******************************************************************************************
export MYSQL_NAME=mariadb
export MYSQL_VERSION=10.0.8
export TARBALL=blank-mariadb-1008
export BENCH_ID=mariadb1008theirs.${TOKUDB_COMPRESSION}.10mm.${RAND_TYPE}
echo "Creating database from ${TARBALL} in ${DB_DIR}"
pushd $DB_DIR
mkdb-quiet $TARBALL
popd
echo "Configuring my.cnf and starting database"
pushd $DB_DIR
if [ ${MYSQL_STORAGE_ENGINE} == "innodb" ]; then
echo "innodb_buffer_pool_size=${INNODB_CACHE}" >> my.cnf
else
echo "tokudb_read_block_size=${TOKUDB_READ_BLOCK_SIZE}" >> my.cnf
echo "tokudb_row_format=${TOKUDB_ROW_FORMAT}" >> my.cnf
echo "tokudb_cache_size=${TOKUDB_CACHE}" >> my.cnf
echo "tokudb_directio=1" >> my.cnf
fi
echo "max_connections=2048" >> my.cnf
echo "performance_schema=OFF" >> my.cnf
mstart
popd
echo "Loading Data"
pushd fastload
./run.load.flatfiles.sh
popd
echo "Running benchmark"
./run.benchmark.sh
echo "Stopping database"
mstop
# *******************************************************************************************
# MARIADB 10.0.8
# *******************************************************************************************
export MYSQL_NAME=mariadb
export MYSQL_VERSION=10.0.8
export TARBALL=blank-mariadb-1008-rp-optimized
export BENCH_ID=mariadb1008rpopt.${TOKUDB_COMPRESSION}.10mm.${RAND_TYPE}
echo "Creating database from ${TARBALL} in ${DB_DIR}"
pushd $DB_DIR
mkdb-quiet $TARBALL
popd
echo "Configuring my.cnf and starting database"
pushd $DB_DIR
if [ ${MYSQL_STORAGE_ENGINE} == "innodb" ]; then
echo "innodb_buffer_pool_size=${INNODB_CACHE}" >> my.cnf
else
echo "tokudb_read_block_size=${TOKUDB_READ_BLOCK_SIZE}" >> my.cnf
echo "tokudb_row_format=${TOKUDB_ROW_FORMAT}" >> my.cnf
echo "tokudb_cache_size=${TOKUDB_CACHE}" >> my.cnf
echo "tokudb_directio=1" >> my.cnf
fi
echo "max_connections=2048" >> my.cnf
echo "performance_schema=OFF" >> my.cnf
mstart
popd
echo "Loading Data"
pushd fastload
./run.load.flatfiles.sh
popd
echo "Running benchmark"
./run.benchmark.sh
echo "Stopping database"
mstop
|
Percona-QA/toku-qa
|
tokudb/software/sysbench/doit-mariadb-performance-rp.bash
|
Shell
|
gpl-2.0
| 2,907 |
#!/bin/sh
## check exporters
ls -la /usr/local/percona/pmm2/exporters | grep -q azure_exporter
ls -la /usr/local/percona/pmm2/exporters | grep -q mongodb_exporter
ls -la /usr/local/percona/pmm2/exporters | grep -q mysqld_exporter
ls -la /usr/local/percona/pmm2/exporters | grep -q node_exporter
ls -la /usr/local/percona/pmm2/exporters | grep -q postgres_exporter
ls -la /usr/local/percona/pmm2/exporters | grep -q proxysql_exporter
ls -la /usr/local/percona/pmm2/exporters | grep -q rds_exporter
ls -la /usr/local/percona/pmm2/exporters | grep -q vmagent
## check tools binary
ls -la /usr/local/percona/pmm2/tools | grep -q pt-mongodb-summary
ls -la /usr/local/percona/pmm2/tools | grep -q pt-mysql-summary
ls -la /usr/local/percona/pmm2/tools | grep -q pt-pg-summary
ls -la /usr/local/percona/pmm2/tools | grep -q pt-summary
## check custom query files
ls -la /usr/local/percona/pmm2/collectors/custom-queries/mysql/medium-resolution | grep -q "queries-mysqld.yml"
ls -la /usr/local/percona/pmm2/collectors/custom-queries/mysql/high-resolution | grep -q "queries-mysqld.yml"
ls -la /usr/local/percona/pmm2/collectors/custom-queries/mysql/high-resolution | grep -q "queries-mysqld-group-replication.yml"
ls -la /usr/local/percona/pmm2/collectors/custom-queries/mysql/low-resolution | grep -q "queries-mysqld.yml"
ls -la /usr/local/percona/pmm2/collectors/custom-queries/postgresql/high-resolution | grep -q "example-queries-postgres.yml"
ls -la /usr/local/percona/pmm2/collectors/custom-queries/postgresql/high-resolution | grep -q "queries-postgres-uptime.yml"
ls -la /usr/local/percona/pmm2/collectors/custom-queries/postgresql/medium-resolution | grep -q "example-queries-postgres.yml"
## Bug PMM-9407 still open
##ls -la /usr/local/percona/pmm2/collectors/custom-queries/postgresql/medium-resolution | grep -q "queries.yaml"
ls -la /usr/local/percona/pmm2/collectors/custom-queries/postgresql/low-resolution | grep -q "example-queries-postgres.yml"
|
Percona-QA/package-testing
|
scripts/check_artifacts.sh
|
Shell
|
gpl-2.0
| 1,961 |
#!/bin/bash
set -o nounset -o pipefail -o errexit
PF_VERSION=${PF_VERSION:-localtest}
PF_RELEASE="`echo $PF_RELEASE | sed -r 's/.*\b([0-9]+\.[0-9]+)\.[0-9]+/\1/g'`"
ISO_NAME=PacketFence-ISO-${PF_VERSION}.iso
# upload
SF_RESULT_DIR=results/sf/${PF_VERSION}
PUBLIC_REPO_DIR="/home/frs/project/p/pa/packetfence/PacketFence\ ISO/${PF_VERSION}"
DEPLOY_SF_USER=${DEPLOY_SF_USER:-inverse-bot,packetfence}
DEPLOY_SF_HOST=${DEPLOY_SF_HOST:-frs.sourceforge.net}
upload_to_sf() {
# warning: slashs at end of dirs are significant for rsync
local src_dir="${SF_RESULT_DIR}/"
local dst_repo="${PUBLIC_REPO_DIR}/"
local dst_dir="${DEPLOY_SF_USER}@${DEPLOY_SF_HOST}:${dst_repo}"
declare -p src_dir dst_dir
echo "rsync: $src_dir -> $dst_dir"
# quotes to handle space in filename
rsync -avz $src_dir "$dst_dir"
}
mkdir -p ${SF_RESULT_DIR}
echo "===> Build ISO for release $PF_RELEASE"
docker run --rm -e PF_RELEASE=$PF_RELEASE -e ISO_OUT="${SF_RESULT_DIR}/${ISO_NAME}" -v `pwd`:/debian-installer debian:11 /debian-installer/create-debian-installer-docker.sh
echo "===> Upload to Sourceforge"
upload_to_sf
|
inverse-inc/packetfence
|
ci/debian-installer/build-and-upload.sh
|
Shell
|
gpl-2.0
| 1,128 |
#!/bin/bash
set -e
allfiles=$(echo *.png | sort)
atlasfiles=""
stems=""
for i in $allfiles
do
echo $i
if [[ $i =~ ^([a-z]+)[1-9].png ]]
then
stem=${BASH_REMATCH[1]}
output=$stem.png.tmp
if [[ $atlasfiles != *$output* ]]
then
montage $stem?.png -geometry 32x32 $output
atlasfiles="$atlasfiles $output"
stems="$stems $stem"
fi
fi
done
montage $atlasfiles -geometry 64x32+0+0 -gravity NorthWest -tile 1x atlas.png
rm -f $atlasfiles
echo $stems | tr ' ' '\n' > atlas.txt
exit 0
|
dm8tbr/sowatch
|
nekowatchlet/original-resources/makeatlas.sh
|
Shell
|
gpl-2.0
| 505 |
jekyll server -H 172.16.18.163
|
Kylinking/kylinking.github.io
|
_site/start.sh
|
Shell
|
gpl-2.0
| 30 |
#!/bin/bash
## Use this script to build and compile a java project
## Jar options
JARVERSION="0.2"
JARNAME="PixelmonUSInstaller"
MAINCLASS="us.pixelmon.installer.Main"
## Path options
#BASEDIR=/home/aaron/repo/github/aabmass/PixelmonUSInstaller
BASEDIR=`pwd`
LISTFILE=$BASEDIR/javasrc.list
LIBDIR=$BASEDIR/lib
SRCDIR=$BASEDIR/src
BINDIR=$BASEDIR/bin
JARDIR=$BASEDIR/jar
## Array of other files you want added to the jar (ie library classes)
ADDTOJAR=("${ADDTOJAR[@]}"
"$SRCDIR/config")
## Formatting
_uf="\e[0m" ##unformat clears formatting
_blue="\e[34m" ##blue
_lc="\e[96m" ##light cyan
_fm="${_lc}==> " ##default formatting
ORIGDIR=`pwd`
mkdir -p {$BASEDIR,$SRCDIR,$BINDIR,$JARDIR}
cd $BASEDIR
echo -e "${_fm}Finding new java files...\n"
find -iname "*.java" | tee $LISTFILE
echo -e "\n${_fm}Beginning compilation...\n"
javac -extdirs $LIBDIR -d $BINDIR @${LISTFILE}
if [ "$?" -eq 1 ]
then
echo -e "${_fm}Compilation was unsuccessful. Exiting..."
exit
else
echo -e "${_fm}Compilation was successful!"
fi
cd $BINDIR
echo -e "${_fm}Adding files specified in 'ADDTOJAR' to the jar..."
cp -R ${ADDTOJAR[@]} $BINDIR
echo -e "${_fm}Creating jar..."
jar cfe ${JARDIR}/${JARNAME}-${JARVERSION}.jar ${MAINCLASS} .
echo -e "${_fm}Done!"
cd $ORIGDIR
|
aabmass/PixelmonUSInstaller
|
dist.sh
|
Shell
|
gpl-2.0
| 1,298 |
#!/bin/sh
# Run this to set up the build system: configure, makefiles, etc.
package="MadJack"
srcdir=`dirname $0`
test -z "$srcdir" && srcdir=.
cd "$srcdir"
DIE=0
(autoheader --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "You must have autoconf installed to compile $package."
echo "Download the appropriate package for your distribution,"
echo "or get the source tarball at ftp://ftp.gnu.org/pub/gnu/"
DIE=1
}
(autoconf --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "You must have autoconf installed to compile $package."
echo "Download the appropriate package for your distribution,"
echo "or get the source tarball at ftp://ftp.gnu.org/pub/gnu/"
DIE=1
}
(automake --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "You must have automake installed to compile $package."
echo "Download the appropriate package for your system,"
echo "or get the source from one of the GNU ftp sites"
echo "listed in http://www.gnu.org/order/ftp.html"
DIE=1
}
(pkg-config --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "You must have pkg-config installed to compile $package."
echo "Download the appropriate package for your system,"
echo "or get the source from http://pkgconfig.freedesktop.org/"
DIE=1
}
if test "$DIE" -eq 1; then
exit 1
fi
echo "Generating configuration files for $package, please wait...."
run_cmd() {
echo " running $* ..."
if ! $*; then
echo failed!
exit 1
fi
}
# Because git doesn't support empty directories
if [ ! -d "$srcdir/build-scripts" ]; then
mkdir "$srcdir/build-scripts"
fi
run_cmd aclocal
run_cmd autoheader
run_cmd automake --add-missing --copy
run_cmd autoconf
$srcdir/configure && echo
|
njh/madjack
|
autogen.sh
|
Shell
|
gpl-2.0
| 1,767 |
#!/bin/sh
./compile_all.sh
./test_run.sh
|
BIC-MNI/JIV2
|
do_all.sh
|
Shell
|
gpl-2.0
| 41 |
#!/usr/bin/env bash
MWT=`git worktree list --porcelain | grep -B 3 "heads/master" | grep worktree \
|cut -d " " -f 2`
if [ -n "$MWT" ]; then
# unfortunately git does not allow checking out the branch that
# has a work-tree elsewhere
pushd "$MWT"
else
BR=`git rev-parse --abbrev-ref HEAD`
if [ "$BR" != "master" ]; then
echo "Not on master branch"
exit 1
fi
fi
REV=`git log -1 | grep merge | sed -e 's/.*merge \([^ ]\+\).*/\1/'`
if [ -z "$REV" ]; then
echo "No merge commit found"
exit 1
fi
if ! git diff-files --quiet; then
echo "Work tree is not clean"
exit 1
fi
echo "Undoing $REV"
git reset --h HEAD^
if [ -n "$MWT" ]; then
popd
fi
git tag -d $REV
git tag -d $REV-dev
if ! git checkout devel; then
echo "Cannot checkout devel"
exit 1
fi
git reset --h HEAD^
|
bolle732/dosemu2
|
unmake-tag.sh
|
Shell
|
gpl-2.0
| 819 |
#! /bin/sh
# Compile and install GCC 4.8.5.
#
# Compiling on CentOS requires the 'glibc-devel.i686' package
# to fix dependency on 'gnu/stubs-32.h'.
#
# Copyright (C) 2017 Qijia (Michael) Jin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Add /usr/local/lib to /etc/ld.conf.so.d/gcc-x86_64.conf
if test ! -s /etc/ld.so.conf.d/gcc-x86_64.conf; then
sudo echo "/usr/local/lib" > /etc/ld.so.conf.d/gcc-x86_64.conf
fi
# Get GNU project public keys.
sh ./get_gnu_public_keys.sh
if test $? -ne 0; then
exit $?
fi
# Build GMP 6.1.2 and install.
sh ./gmp_6.1.2_compile.sh
if test $? -ne 0; then
exit $?
fi
# Build MPFR 3.1.5 and install.
sh ./mpfr_3.1.5_compile.sh
if test $? -ne 0; then
exit $?
fi
# Build MPC 1.0.3 and install.
sh ./mpc_1.0.3_compile.sh
if test $? -ne 0; then
exit $?
fi
# Build ISL 0.14 and install.
sh ./isl_0.14_compile.sh
if test $? -ne 0; then
exit $?
fi
# Build Libtool 2.4.6 and install.
sh ./libtool_2.4.6_compile.sh
if test $? -ne 0; then
exit $?
fi
# Build Guile 2.0.13 and install.
sh ./guile_2.0.13_compile.sh
if test $? -ne 0; then
exit $?
fi
# Check if guile is installed.
check_guile=$(command -v guile >/dev/null 2>&1)
if test $? -ne 0; then
check_guile_exit_code=$?
echo "error: guile could not be found!"
exit $check_guile_exit_code
fi
# Build AutoGen 5.18.12 and install.
sh ./autogen_5.18.12_compile.sh
if test $? -ne 0; then
exit $?
fi
# Check if autogen is installed.
check_autogen=$(command -v autogen >/dev/null 2>&1)
if test $? -ne 0; then
check_autogen_exit_code=$?
echo "error: autogen could not be found!"
exit $check_autogen_exit_code
fi
# Build GCC 4.8.5 and install.
sh ./gcc_4.8.5_compile.sh
if test $? -ne 0; then
exit $?
fi
|
mikhail-j/gcc_compiler_scripts
|
gcc_4.8.5_compile_install.sh
|
Shell
|
gpl-2.0
| 2,360 |
#!/bin/bash
# ****************************************************************
# prepare_clinuxwin.sh ==
# Script principal de préparation des scripts pour les clients linux
# A lancer sur /home/netlogon/clients-linux sur clinux-serv
# 20180604
# **************************************************************
DATE1=$(date +%F+%0kh%0M)
#Couleurs
ROUGE="\\033[1;31m"
VERT="\\033[1;32m"
BLEU="\\033[1;34m"
JAUNE="\\033[1;33m"
COLTITRE="\033[1;35m" # Rose
COLDEFAUT="\033[0;33m" # Brun-jaune
COLCMD="\033[1;37m" # Blanc
COLERREUR="\033[1;31m" # Rouge
COLTXT="\033[0;37m" # Gris
COLINFO="\033[0;36m" # Cyan
COLPARTIE="\033[1;34m" # Bleu
ftp=https://raw.githubusercontent.com/jcmousse/clinux/master
ftp2=http://jcmousse.free.fr
# On installe net-tools si ifconfig n'est pas installé
if [ ! -e /sbin/ifconfig ] ; then
apt-get update && apt-get install -y net-tools
fi
# Récupération de l'ip du serveur
getip=$(ifconfig $(ifconfig | grep enp | cut -d: -f1) | grep "inet\ 1" |awk '{print $2}')
#####
# Fonction test de la connexion
LINE_TEST()
{
if ( ! wget -q --output-document=/dev/null 'http://wawadeb.crdp.ac-caen.fr/index.html') ; then
ERREUR "Votre connexion internet ne semble pas fonctionnelle !"
exit 1
fi
}
#####
###################################################################
# Verification et téléchargement de la dernière version du script #
###################################################################
SCRIPT_FILE="prepare_clinuxwin_20180604.sh"
SCRIPT_FILE_MD5="prepare_clinuxwin_20180604.md5"
SCRIPTS_DIR="/home/netlogon/clients-linux"
NODL="no"
DEBUG="no"
# lili
# Test de l'accès internet
LINE_TEST
# Preparation initiale, si on installe la machine
if [ ! -e $SCRIPTS_DIR ]; then
echo -e "$COLINFO"
echo "Préparation du répertoire principal..."
echo -e "$COLTXT"
mkdir -p $SCRIPTS_DIR
cd $SCRIPTS_DIR
echo -e "$COLINFO"
echo "Téléchargement initial des scripts..."
echo -e "$COLTXT"
wget --no-check-certificate --no-cache -N --tries=1 --connect-timeout=1 $ftp/clinuxwin/$SCRIPT_FILE || ERREUR="1"
wget --no-check-certificate --no-cache -N --tries=1 --connect-timeout=1 $ftp/clinuxwin/$SCRIPT_FILE_MD5 || ERREUR="1"
if [ "$ERREUR" = "1" ];then
echo -e "$COLERREUR"
echo "Problème pour récupérer le script principal : ABANDON !"
echo -e "$COLTXT"
exit 1
fi
chmod +x $SCRIPT_FILE
echo -e "$JAUNE"
echo "Fin de la préparation."
echo "============================================================="
echo "Relancez Relancez /home/netlogon/clients-linux/prepare_clinuxwin.sh"
echo -e "$COLTXT"
exit 1
fi
# Ajout du path pour root
#TEST_PROFILE=$(cat /root/.profile | grep clients-linux)
#if [ ! -n "$TEST_PROFILE" ]; then
# sed -i '2a\PATH="/home/netlogon/clients-linux" ' /root/.profile
#fi
# Alias pour l'extinction
TEST_HALT=$(cat /root/.bashrc | grep poweroff)
if [ ! -n "$TEST_HALT" ]; then
echo "alias halt='poweroff'" >> /root/.bashrc
fi
# Test et téléchargement éventuel d'une nouvelle version
if [ "$NODL" != "yes" ]; then
echo -e "$COLINFO"
echo "Vérification en ligne que vous avez bien la dernière version du script..."
echo -e "$COLTXT"
cd /root
# Téléchargement dans /root du script à jour et du fichier md5
rm -f $SCRIPT_FILE_MD5 $SCRIPT_FILE
wget --no-check-certificate --no-cache -N --tries=1 --connect-timeout=1 --quiet $ftp/clinuxwin/$SCRIPT_FILE || ERREUR="1"
wget --no-check-certificate --no-cache -N --tries=1 --connect-timeout=1 --quiet $ftp/clinuxwin/$SCRIPT_FILE_MD5 || ERREUR="1"
if [ "$ERREUR" = "1" ];then
echo -e "$COLERREUR"
echo "Problème pour récupérer la version en ligne : ABANDON !"
echo -e "$COLTXT"
exit 1
fi
# Calcul des md5 des fichiers qu'on vient de télécharger
MD5_CTRL_FILE=$(cat $SCRIPT_FILE_MD5) # contenu du md5 téléchargé
MD5_CTRL_DL=$(md5sum $SCRIPT_FILE) # md5 du script téléchargé
# Controle de ce qu'on vient de télécharger
if [ "$MD5_CTRL_FILE" != "$MD5_CTRL_DL" ]
then
# Il y a une erreur dans ce qui a été téléchargé
echo -e "$COLERREUR"
echo "Controle MD5 du script téléchargé incorrect. Relancez le script pour qu'il soit de nouveau téléchargé."
echo -e "$COLTXT"
exit 1
fi
# On se rend dans /home/netlogon/clients-linux
cd $SCRIPTS_DIR
# Calcul du md5 de /home/netlogon/clients-linux/prepare_clinuxwin.sh
MD5_CTRL_LOCAL=$(md5sum $SCRIPT_FILE)
# On retourne dans /root et on rend le script téléchargé executable
cd
chmod +x *.sh
# On compare le md5 du script présent sur le serveur clinux avec celui du script téléchargé
if [ "$MD5_CTRL_FILE" != "$MD5_CTRL_LOCAL" ]; then
# Si les md5 sont différents, on passe la variable relance à yes
RELANCE="YES"
# Puis on copie le script depuis /root vers le repertoire des scripts
cp $SCRIPT_FILE $SCRIPTS_DIR/
fi
if [ "$RELANCE" == "YES" ]
then
echo -e "$JAUNE"
echo "Le script a été mis à jour depuis le dépôt."
echo "Relancez /home/netlogon/clients-linux/prepare_clinuxwin.sh"
echo ""
echo -e "$COLTXT"
sleep 1
exit 1
fi
echo -e "$VERT"
echo ""
echo "Vous disposez de la dernière version du script, on peut continuer..."
sleep 3
echo -e "$COLTXT"
else
echo "Mode debug, pas de téléchargement."
sleep 2
fi
##############################################################################
# Fonction mise à jour de la machine et installation des paquets indispensables
MAJSERV()
{
apt-get update && apt-get -y upgrade && apt-get -y dist-upgrade
apt-get install -y samba cifs-utils apt-cacher-ng net-tools
}
##########################
# Fonction affichage du fichier de paramètres proxy(s)
AFFICHE_PROXYS()
{
cat /etc/clinux/proxys.data
}
##########################
# Fonction affichage du fichier de paramètres serveur
AFFICHE_PARAMS()
{
cat /etc/clinux/params.txt
}
##########################
# Fonction définition des variables du domaine
DEF_VAR()
{
# Fichier de paramètres
#params_file=/home/jc/clinux/clinuxwin/sedtest.txt
params_file=/etc/clinux/params.txt
# Recup des parametres existants
EX_IPPART=$(cat /etc/clinux/params.txt | grep partages | cut -d':' -f2)
EX_IPDNS=$(cat /etc/clinux/params.txt | grep dns | cut -d':' -f2)
EX_DOM1=$(cat /etc/clinux/params.txt | grep domaine | cut -d':' -f2)
EX_NOMDNS=$(cat /etc/clinux/params.txt | grep rodc | cut -d':' -f2)
EX_NOMAD=$(cat /etc/clinux/params.txt | grep pdc | cut -d':' -f2)
echo -e "$JAUNE"
echo "####################################"
echo "# Saisie des paramètres du serveur #"
echo "####################################"
echo -e "$COLTXT"
echo -e "$COLTITRE" "Adresse IP du serveur : " ; tput sgr0
echo -e "$COLDEFAUT" "Adresse existante dans le fichier de paramètres : [$EX_IPPART] " ; tput sgr0
echo -e "$COLINFO" "Validez par entrée si cette adresse convient, ou entrez une autre adresse : " ; tput sgr0
echo -e "$COLTXT"
read IPPART
echo ""
if [ -z "$IPPART" ]; then
IPPART=$EX_IPPART
fi
echo -e "$COLTXT" "L'adresse IP du serveur des partages est $IPPART"
echo ""
echo "Adresse serveur partages :$IPPART" > $params_file
#echo -e "$COLTITRE" "Entrez l'adresse IP du serveur DNS: "
#echo -e "$COLDEFAUT" "Adresse existante dans le fichier de paramètres : [$EX_IPDNS] "
#echo -e "$COLINFO" "Validez par entrée si cette adresse convient, ou entrez une autre adresse:"
#echo -e "$COLTXT"
#read IPDNS
# if [ -z "$IPDNS" ]; then
# IPDNS=$EX_IPDNS
# fi
#echo -e "$COLTXT" "L'adresse IP du serveur DNS est $IPDNS "
#echo ""
IPDNS=$IPPART
echo "Adresse serveur dns :$IPDNS" >> $params_file
echo -e "$COLTITRE" "Entrez le nom de domaine (en minuscules): " ; tput sgr0
echo -e "$COLINFO" "Exemple : acad-clermont.local " ; tput sgr0
echo -e "$COLDEFAUT" "Domaine existant dans le fichier de paramètres : [$EX_DOM1] "
echo -e "$COLINFO" "Validez par entrée si ce nom convient, ou entrez une autre adresse:"
echo -e "$COLTXT"
echo ""
read DOM1
echo ""
if [ -z "$DOM1" ]; then
DOM1=$EX_DOM1
fi
echo -e "$COLTXT" "Le domaine est $DOM1 "
echo ""
echo "Nom de domaine :$DOM1" >> $params_file
echo -e "$COLTITRE" "Entrez le nom DNS du serveur : " ; tput sgr0
echo -e "$COLDEFAUT" "Nom DNS existant dans le fichier de paramètres : [$EX_NOMDNS] "
echo -e "$COLINFO" "Validez par entrée si ce nom convient, ou entrez une autre adresse:"
echo -e "$COLTXT"
read NOMDNS
if [ -z "$NOMDNS" ]; then
NOMDNS=$EX_NOMDNS
fi
echo ""
echo -e "$COLTXT" "Le nom dns du serveur est $NOMDNS "
echo -e "$COLTXT" "Le nom fqdn du serveur est $NOMDNS.$DOM1 "
echo "Serveur rodc :$NOMDNS" >> $params_file
echo "Nom fqdn du serveur :$NOMDNS.$DOM1" >> $params_file
echo ""
#echo -e "$COLTITRE" "Entrez le nom du serveur PDC : " ; tput sgr0
#echo -e "$COLDEFAUT" "Nom existant dans le fichier de paramètres : [$EX_NOMAD] "
#echo -e "$COLINFO" "Validez par entrée si ce nom convient, ou entrez une autre adresse:"
#echo -e "$COLTXT"
#read NOMPDC
# if [ -z "$NOMPDC" ]; then
# NOMPDC=$EX_NOMAD
# fi
NOMPDC=$NOMDNS
echo -e "$COLTXT" "Le nom du serveur PDC est $NOMPDC "
echo "Nom pdc :$NOMPDC" >> $params_file
echo -e "$JAUNE"
echo "##### Paramètres serveur #####"
echo ""
echo "Voici les paramètres actuels pour le serveur :"
echo ""
echo -e "$COLTXT"
AFFICHE_PARAMS
echo ""
echo -e "$COLINFO"
echo "Validez par entrée si tout est correct, ou quittez le script (ctrl-c) puis relancez le."
echo -e "$COLTXT"
read lapin
echo ""
}
##########################
# Fonction création des répertoires indispensables
DEF_REP_1()
{
echo -e "$COLINFO"
echo "Création des répertoires indispensables..."
echo -e "$COLTXT"
rep1=/etc/clinux
rep2=/home/netlogon/clients-linux/alancer
rep3=/home/netlogon/clients-linux/once
rep4=/home/netlogon/clients-linux/conex
rep5=/home/netlogon/clients-linux/persolinks/base
rep6=/home/netlogon/clients-linux/persolinks/cdi
rep7=/home/netlogon/clients-linux/persolinks/sprofs
rep8=/home/netlogon/clients-linux/persolinks/eleves
rep9=/home/netlogon/clients-linux/persolinks/profs
rep10=/home/netlogon/clients-linux/persolinks/autres
rep11=/home/netlogon/clients-linux/divers
if [ ! -e $rep1 ]; then
mkdir -p /etc/clinux
fi
if [ ! -e $rep2 ]; then
mkdir -p /home/netlogon/clients-linux/alancer
fi
if [ ! -e $rep3 ]; then
mkdir -p /home/netlogon/clients-linux/once
fi
if [ ! -e $rep4 ]; then
mkdir -p /home/netlogon/clients-linux/conex
fi
if [ ! -e $rep5 ]; then
mkdir -p /home/netlogon/clients-linux/persolinks/base
fi
if [ ! -e $rep6 ]; then
mkdir -p /home/netlogon/clients-linux/persolinks/cdi
fi
if [ ! -e $rep7 ]; then
mkdir -p /home/netlogon/clients-linux/persolinks/sprofs
fi
if [ ! -e $rep8 ]; then
mkdir -p /home/netlogon/clients-linux/persolinks/eleves
fi
if [ ! -e $rep9 ]; then
mkdir -p /home/netlogon/clients-linux/persolinks/profs
fi
if [ ! -e $rep10 ]; then
mkdir -p //home/netlogon/clients-linux/persolinks/autres
fi
if [ ! -e $rep11 ]; then
mkdir -p /home/netlogon/clients-linux/divers
fi
}
##########################
# Fonction saisie du fichier de paramètres proxy(s)
CONF_PROXYS()
{
echo -e "$JAUNE"
echo "####################################"
echo "# Saisie des paramètres des proxys #"
echo "####################################"
echo -e "$COLTXT"
touch /etc/clinux/proxys.data
dirprox=/etc/clinux
fichproxy=proxys.data
testapt=$(cat $dirprox/$fichproxy | grep ip_cache)
testamon=$(cat $dirprox/$fichproxy | grep ip_amon)
if [ ! -n "$testapt" ] ; then
echo -e "$COLTITRE"
echo "Voulez-vous installer un cache apt sur cette machine ?"
echo -e "$COLTXT"
PS3='Repondre par o ou n: '
LISTE=("[o] oui" "[n] non")
select CHOIX in "${LISTE[@]}" ; do
case $REPLY in
1|o)
echo -e "$COLINFO"
echo "Le cache apt sera installé (cool)."
echo -e "$COLTXT"
CACHE=o
break
;;
2|n)
echo -e "$COLINFO"
echo "Pas de cache apt (beurk)."
echo -e "$COLTXT"
echo ""
CACHE=n
break
;;
esac
done
else CACHE=o
fi
# Définition de l'adresse du cache apt de l'etablissement
if [ $CACHE = "o" ] ; then
if [ ! -n "$testapt" ] ; then
echo -e "$COLTITRE"
echo "Adresse du CACHE APT de l'etablissement :"
echo -e "$COLINFO"
echo "$getip"
echo -e "$COLTXT"
echo ""
APTCACHE=$getip
echo "ip_cache=$APTCACHE" >> $dirprox/$fichproxy
echo -e "$COLINFO" "ip du cache stockée dans le fichier" ; tput sgr0
echo -e "$COLTXT"
echo "L'adresse du CACHE APT est $APTCACHE"
echo -e "$COLTXT"
echo ""
else
APTCACHE=$(cat $dirprox/$fichproxy | grep ip_cache | cut -d= -f2 | awk '{ print $1}')
echo -e "$COLTXT" "valeur lue pour le cache : $APTCACHE " ; tput sgr0
fi
else
if [ $CACHE = "o" ] ; then
# On affiche l'adresse ip du cache
echo -e "$COLTXT"
echo "Voici l'adresse du cache apt enregistrée sur ce serveur :"
echo -e "$testapt"
fi
fi
# Définition de l'adresse du proxy amon de l'etablissement
if [ ! -n "$testamon" ] ; then
echo -e "$COLINFO" "Adresse du proxy AMON de l'etablissement :"
echo -e "$COLTITRE"
echo "Entrez l'adresse du proxy Amon de l'etablissement sous la forme 172.17.31.254"
echo -e "$COLTXT"
read IPAMON
echo ""
echo "ip_amon=$IPAMON" >> $dirprox/$fichproxy
echo -e "$COLINFO" "ip de l'Amon stockée dans le fichier" ; tput sgr0
echo -e "$COLTXT"
echo "L'adresse du proxy Amon est $IPAMON"
echo -e "$COLTXT"
echo ""
else
IPAMON=$(cat $dirprox/$fichproxy | grep ip_amon | cut -d= -f2 | awk '{ print $1}')
echo -e "$COLTXT" "valeur lue pour l'amon : $IPAMON " ; tput sgr0
fi
}
####
# Fonction configuration du serveur (minimal) samba
CONF_SMB()
{
cp /etc/samba/smb.conf /etc/samba/smb.conf_$DATE1
dom1=$(cat /etc/clinux/params.txt | grep domaine | cut -d':' -f2)
name=$(hostname)
echo "#smb.conf minimal
# Partage pour les clients
[global]
workgroup = $dom1
netbios name = $name
dns proxy = no
domain logons = no
domain master = no
[netlogon]
comment = Scripts clients linux
path = /home/netlogon/clients-linux
browseable = yes
read only = yes
guest ok = yes" > /etc/samba/smb.conf
}
###
# Fonction réglage des droits
PERM()
{
echo -e "$COLINFO" "Réglage des droits sur les scripts... fait."
echo ""
echo -e "$COLTXT"
chmod +x /home/netlogon/clients-linux/*.sh
chmod +x $rep2/*.sh
chmod +x $rep3/*.sh
chmod +x $rep4/*.sh
}
###############################################################
###############################################################
# Création des répertoires
DEF_REP_1
# Configuration des proxys
CONF_PROXYS
echo ""
AFFICHE_PROXYS
echo ""
echo -e "$COLTITRE"
echo "Les paramètres PROXYs sont-ils corrects? "
echo -e "$COLTXT"
read -p " o (oui)
n (non) : " rep
case $rep in
o )
# Tout va bien, on continue
echo "On poursuit..."
;;
n )
# On relance la configuration des proxys
> /etc/clinux/proxys.data
CONF_PROXYS
;;
esac
echo -e "$COLINFO"
echo "##############################################################"
echo "##############################################################"
echo -e "$COLTXT"
# Test existence fichier paramètres
if [ -f "/etc/clinux/params.txt" ];then
echo -e "$COLINFO"
echo "Le fichier de paramètres serveur existe."
echo -e "$COLTXT"
else
DEF_VAR
fi
# Affichage des paramètres actuels
echo -e "$JAUNE"
echo "##############################"
echo "### Paramètres SERVEUR ###"
echo "##############################"
echo ""
echo "Voici les paramètres actuels pour le serveur : "
echo ""
echo -e "$COLTXT"
AFFICHE_PARAMS
echo ""
echo -e "$COLTITRE"
echo "Les paramètres SERVEUR sont-ils corrects? "
echo -e "$COLTXT"
read -p " o (oui)
n (non) : " rep
case $rep in
o )
# Tout va bien, on continue
echo "On poursuit..."
;;
n )
# On relance la saisie des parametres
echo ""
DEF_VAR
;;
esac
echo ""
# Mise à jour de la machine et installations
echo ""
echo -e "$COLINFO"
echo "Mise à jour de la machine..."
echo -e "$COLTXT"
MAJSERV
# Configuration de samba
echo ""
echo -e "$COLINFO"
echo "Configuration de samba pour le partage des clients..."
echo -e "$COLTXT"
CONF_SMB
###################################################
## Telechargement et configuration des scripts ##
script_connect=createlinks_20180604.sh
script_deconnect=deletelinks_20180604.sh
# Scripts de connexion/déconnexion (repertoire /conex)
cd $rep4
rm -f init_unefois*
wget --no-check-certificate --quiet $ftp/clinuxwin/conex/init_unefois.sh
echo -e "$COLINFO"
echo "/conex/init_unefois.sh créé ou mis à jour."
echo -e "$COLTXT"
rm -f createlinks*
wget --no-check-certificate --quiet $ftp/clinuxwin/conex/$script_connect
echo -e "$COLINFO"
echo "/conex/createlinks.sh créé ou mis à jour."
echo -e "$COLTXT"
rm -f deletelinks*
wget --no-check-certificate --quiet $ftp/clinuxwin/conex/$script_deconnect
echo -e "$COLINFO"
echo "/conex/deletelinks.sh créé ou mis à jour."
echo -e "$COLTXT"
rm -f synchrohome*
wget --no-check-certificate --quiet $ftp/clinuxwin/conex/synchrohome.sh
echo -e "$COLINFO"
echo "/conex/synchrohome.sh créé ou mis à jour."
echo -e "$COLTXT"
rm -f profil_ff*
wget --no-check-certificate --quiet $ftp/clinuxwin/conex/profil_ff.sh
echo -e "$COLINFO"
echo "/conex/profil_ff.sh créé ou mis à jour."
echo -e "$COLTXT"
rm -f alsavolume*
wget --no-check-certificate --quiet $ftp/clinuxwin/conex/alsavolume.sh
echo -e "$COLINFO"
echo "/conex/alsavolume.sh créé ou mis à jour."
echo -e "$COLTXT"
rm -f mimeapps.list*
wget --no-check-certificate --quiet $ftp/clinuxwin/conex/mimeapps.list
echo -e "$COLINFO"
echo "/conex/mimeapps.list créé ou mis à jour."
chmod +x *.sh
chmod 755 *.sh
# Scripts à exécuter unefois (repertoire /once)
cd $rep3
rm -f test_unefois.sh modif_lxde_icons.sh lxde_icons_unefois.sh cfguu_unefois.sh xscreen_unefois.sh mount-a_unefois.sh
rm -f test_unefois* lxde_icons_unefois* config_wol_unefois* cfguu_unefois*
wget --no-check-certificate --quiet $ftp/clinuxwin/once/test_unefois.sh
wget --no-check-certificate --quiet $ftp/clinuxwin/once/lxde_icons_unefois.sh
#wget --no-check-certificate --quiet $ftp/clinuxwin/once/cfguu_unefois.sh
wget --no-check-certificate --quiet $ftp/clinuxwin/once/xscreen_unefois.sh
#wget --no-check-certificate --quiet $ftp/clinuxwin/once/mount-a_unefois.sh
chmod +x *.sh
chmod 755 *.sh
# Scripts qui peuvent être lancés sur les clients (répertoire /alancer)
# Récupération de l'ip du serveur
getse3ip=$(ifconfig $(ifconfig | grep enp | cut -d: -f1) | grep "inet\ 1" |awk '{print $2}')
cd $rep2
#wget --no-check-certificate --quiet $ftp/clinuxwin/alancer/maj_java_local.sh
wget --no-check-certificate --quiet $ftp/clinuxwin/alancer/maj_client_stretch.sh
if [ -n "$testapt" -o $CACHE = "o" ] ; then
# sed -i 's/##ipcache##/'"$APTCACHE"'/g' $rep2/maj_java_local.sh
# sed -i 's/##ipamon##/'"$IPAMON"'/g' $rep2/maj_java_local.sh
sed -i 's/##ipcache##/'"$APTCACHE"'/g' $rep2/maj_client_stretch.sh
sed -i 's/##ipamon##/'"$IPAMON"'/g' $rep2/maj_client_stretch.sh
echo -e "$COLINFO"
echo "maj_client_stretch.sh téléchargé ou mis à jour. "
# echo "maj_java_local.sh et maj_client_stretch.sh téléchargés ou mis à jour. "
echo -e "$COLTXT"
rm -f change_proxy_client.sh
wget --no-check-certificate --quiet $ftp/clinuxwin/alancer/change_proxy_client.sh
sed -i 's/##ipcache##/'"$APTCACHE"'/g' $rep2/change_proxy_client.sh
sed -i 's/##ipamon##/'"$IPAMON"'/g' $rep2/change_proxy_client.sh
echo -e "$COLINFO"
echo "change_proxy_client.sh téléchargé ou mis à jour. "
echo -e "$COLTXT"
else
# sed -i 's/##ipcache##/'"$IPAMON"'/g' $rep2/maj_java*
# sed -i 's/##ipamon##/'"$IPAMON"'/g' $rep2/maj_java*
# sed -i 's/3142/3128/g' $rep2/maj_java*
sed -i 's/3142/3128/g' $rep2/maj_client_*
sed -i 's/##ipcache##/'"$IPAMON"'/g' $rep2/maj_client_*
sed -i 's/##ipamon##/'"$IPAMON"'/g' $rep2/maj_client_*
echo -e "$COLINFO"
# echo "maj_java_local.sh téléchargé ou mis à jour. "
echo "maj_client_stretch.sh téléchargé ou mis à jour. "
echo -e "$COLTXT"
fi
# Scripts de correction des clés ssh, de renommage et d'installation
cd $rep2
rm -f $rep2/correctif_cles_ssh*
wget --no-check-certificate --quiet $ftp/clinuxwin/alancer/correctif_cles_ssh.sh
sed -i 's/##ipamon##/'"$IPAMON"'/g' $rep2/correctif_cles_ssh.sh
sed -i 's/##se3ip##/'"$getse3ip"'/g' $rep2/correctif_cles_ssh.sh
echo -e "$COLINFO"
echo "correctif_cles_ssh.sh téléchargé ou mis à jour. "
echo -e "$COLTXT"
rm -f $rep2/renomme_client_linux*
wget --no-check-certificate --quiet $ftp/clinuxwin/alancer/renomme_client_linux_v2.sh
#sed -i 's/##se3rne##/'"$getrne"'/g' $rep2/renomme_client_linux_v2.sh
echo -e "$COLINFO"
echo "renomme_client_linux_v2.sh téléchargé ou mis à jour. "
echo -e "$COLTXT"
rm -f $rep2/corrige_uuid_swap*
wget --no-check-certificate --quiet $ftp/clinuxwin/alancer/corrige_uuid_swap.sh
echo -e "$COLINFO"
echo "corrige_uuid_swap.sh téléchargé ou mis à jour. "
echo -e "$COLTXT"
rm -f $rep2/vnc_user*
wget --no-check-certificate --quiet $ftp/clinuxwin/alancer/vnc_user.sh
echo -e "$COLINFO"
echo "vnc_user.sh téléchargé ou mis à jour. "
echo -e "$COLTXT"
rm -f $rep2/xscreensaver_*
#wget --quiet $ftp2/se3/xscreensaver_5.36-1_amd64.deb
#wget --quiet $ftp2/se3/xscreensaver_5.36-1_i386.deb
#echo -e "$COLINFO" "Paquets xscreensaver téléchargés." ; tput sgr0
cd
echo ""
echo -e "$COLINFO" "Variables remplacées dans les scripts." ; tput sgr0
cd
# Recuperation et configuration du script d'integration des clients
script_int=rejoint_clinux_win_auto_20180604.sh
cd /home/netlogon/clients-linux
rm -f rejoint_clinux*
wget --no-check-certificate --quiet $ftp/clinuxwin/stretch/integration/$script_int
IPPART=$(cat /etc/clinux/params.txt | grep partages | cut -d':' -f2)
IPDNS=$(cat /etc/clinux/params.txt | grep dns | cut -d':' -f2)
DOM1=$(cat /etc/clinux/params.txt | grep domaine | cut -d':' -f2)
NOMDNS=$(cat /etc/clinux/params.txt | grep rodc | cut -d':' -f2)
NOMAD=$(cat /etc/clinux/params.txt | grep pdc | cut -d':' -f2)
echo ""
echo "partages : $IPPART"
echo "dns : $IPDNS"
echo "domaine : $DOM1"
echo "nom dns rodc : $NOMDNS"
echo "nom pdc : $NOMAD"
echo "ip serveur clinux : $getse3ip"
echo ""
echo -e "$JAUNE" "Validez par entrée si tout est correct."
echo -e "$COLTXT"
read zut
sed -i 's/##ippart##/'"$IPPART"'/g' /home/netlogon/clients-linux/$script_int
sed -i 's/##ipdns##/'"$IPDNS"'/g' /home/netlogon/clients-linux/$script_int
sed -i 's/##dom1##/'"$DOM1"'/g' /home/netlogon/clients-linux/$script_int
sed -i 's/##nomdns##/'"$NOMDNS"'/g' /home/netlogon/clients-linux/$script_int
sed -i 's/##nomad##/'"$NOMAD"'/g' /home/netlogon/clients-linux/$script_int
sed -i 's/##se3ip##/'"$getse3ip"'/g' /home/netlogon/clients-linux/$script_int
echo -e "$VERT" "Les scripts d'intégration des clients sont prêts. "
echo ""
echo -e "$COLTXT"
# Recuperation et configuration du script d'installation des clients
script_install=install_stretch_etabs.sh
cd /home/netlogon/clients-linux
rm -f install_stretch_etabs*
wget --no-check-certificate --quiet $ftp/clinuxwin/stretch/installation/$script_install
echo -e "$VERT" "Le script d'installation des clients a été téléchargé. "
echo ""
echo -e "$COLTXT"
# Téléchargement des raccourcis usuels
# base
cd $rep5
rm -f iceweasel.desktop repare_profil.desktop ent.desktop synchro.desktop
wget --no-check-certificate --quiet $ftp/clinuxwin/raccourcis/iceweasel.desktop
wget --no-check-certificate --quiet $ftp/clinuxwin/raccourcis/repare_profil.desktop
wget --no-check-certificate --quiet $ftp/clinuxwin/raccourcis/ent.desktop
wget --no-check-certificate --quiet $ftp/clinuxwin/raccourcis/synchro.desktop
# cdi
cd $rep6
rm -f esidoc.desktop qwant.desktop
wget --no-check-certificate --quiet $ftp/clinuxwin/raccourcis/esidoc.desktop
wget --no-check-certificate --quiet $ftp/clinuxwin/raccourcis/qwant.desktop
# salle des profs
cd $rep7
#rm -f iceweasel.desktop
#wget --no-check-certificate --quiet $ftp/clinuxwin/raccourcis/iceweasel.desktop
# eleves
cd $rep8
rm -f eleves.desk*
wget --no-check-certificate --quiet $ftp/clinuxwin/raccourcis/eleves.desktop
# profs
cd $rep9
rm -f prof.desk* horde.desk*
wget --no-check-certificate --quiet $ftp/clinuxwin/raccourcis/prof.desktop
wget --no-check-certificate --quiet $ftp/clinuxwin/raccourcis/horde.desktop
# autres
#cd $rep10
#rm -f veyon-prof*
#wget --no-check-certificate --quiet $ftp/clinuxwin/raccourcis/veyon-prof.desktop
echo -e "$COLINFO"
echo "Téléchargement des raccourcis terminé."
echo -e "$COLTXT"
echo -e "$COLINFO"
echo "Ajustement des droits sur les répertoires et les fichiers..."
echo -e "$COLTXT"
# Recuperation et configuration du script de copie sur les clients (integration)
script_copie_integ=copie_script_integration_clinuxwin.sh
cd /home/netlogon/clients-linux
rm -f copie_script_integration*
wget --no-check-certificate --quiet $ftp/clinuxwin/$script_copie_integ
# Recuperation et configuration du script de copie sur les clients (installation)
script_copie_install=copie_script_installation_clinuxwin.sh
cd /home/netlogon/clients-linux
rm -f copie_script_installation*
wget --no-check-certificate --quiet $ftp/clinuxwin/$script_copie_install
# Configuration de apt-cacher-ng
testaptcacher=$(cat /etc/apt-cacher-ng/acng.conf | grep $IPAMON)
if [ -n "$testaptcacher" ] ; then
echo -e "$VERT"
echo "Apt-cacher est déjà configuré."
echo -e "$COLTXT"
else
echo -e "$JAUNE"
echo "On (re)configure apt-cacher-ng..."
echo -e "$COLTXT"
sed -i 's/#\ Port:3142/Port:3142/g' /etc/apt-cacher-ng/acng.conf
sed -i 's/^#\ Proxy:\ http:\/\/.*/Proxy:\ http:\/\/'$IPAMON':3128/g' /etc/apt-cacher-ng/acng.conf
sed -i 's/^Proxy:\ http:\/\/.*/Proxy:\ http:\/\/'$IPAMON':3128/g' /etc/apt-cacher-ng/acng.conf
service apt-cacher-ng restart
fi
# Réglage des droits sur les répertoires et les scripts
PERM
# Dépot d'un témoin dans /root
rm -f /root/prep_*
touch /root/prep_$DATE1
echo -e "$VERT"
echo "Terminé !"
echo -e "$COLTXT"
exit 0
|
jcmousse/clinux
|
clinuxwin/prepare_clinuxwin_20180604.sh
|
Shell
|
gpl-2.0
| 25,691 |
#!/usr/bin/env bash
# Author: cbweaver (https://github.com/cbweaver)
# Description: Print usage information
# Purpose: Print usage information for all commands
# Arguments:
# None
function all_usages {
echo "$(basename $0) [options] [commands]" >&2
echo "Options" >&2
echo " --quiet Be quiet. Less output." >&2
echo " -q" >&2
echo "" >&2
echo " --version Print version" >&2
echo " -v" >&2
echo "" >&2
echo "Commands:" >&2
for usage_file in $self_dir/lib/usage/*; do
# Print command name and definition
echo " $(sed -n 1p $usage_file)" >&2
# Print command description
echo " $(sed -n 2p $usage_file)" >&2
echo "" >&2
done
}
# Purpose: Print usage information for a specific command
# Arguments:
# 1. (optional) command
function usage {
echo "" >&2
if [[ $# -ne 1 ]]; then
all_usages
else
if [[ -f "$self_dir/lib/usage/$1.md" ]];then
echo -n "$(basename ${0}) " >&2
cat "$self_dir/lib/usage/$1.md" >&2
else
all_usages
fi
fi
echo "" >&2
}
|
cbweaver/cms-manager
|
lib/usage.sh
|
Shell
|
gpl-2.0
| 1,057 |
#!/bin/sh
#
# Helper script to make release packages:
# - mCtrl-$VERSION-src.zip
# - mCtrl-$VERSION-bin.zip
#
# All packages are put in current directory (and overwritten if exist already).
# We want to control what's going to stdout/err.
exec 3>&1
exec 1>/dev/null 2>&1
# Change dir to project root.
cd `dirname "$0"`/..
CWD=`pwd`
PRJ="$CWD"
# Sanity check we run this script from the right directory:
if [ ! -x $PRJ/scripts/release.sh ]; then
echo "There is some path mismatch." >&3
exit 1
fi
# We will do all the real work in $TMP directory:
if [ x$TMP = x ]; then
TMP=/tmp
fi
if [ ! -d $TMP ]; then
echo "Directory '$TMP' does not exist." >&3
exit 1
fi
##################
# Detect version #
##################
echo -n "Detecting mCtrl version... " >&3
VERSION_MAJOR=`grep MCTRL_MAJOR_VERSION CMakeLists.txt | head -1 | sed 's/[^0-9]//g'`
VERSION_MINOR=`grep MCTRL_MINOR_VERSION CMakeLists.txt | head -1 | sed 's/[^0-9]//g'`
VERSION_PATCH=`grep MCTRL_PATCH_VERSION CMakeLists.txt | head -1 | sed 's/[^0-9]//g'`
VERSION=$VERSION_MAJOR.$VERSION_MINOR.$VERSION_PATCH
if [ x$VERSION = x ]; then
echo "Failed." >&3
exit 1
fi
echo "$VERSION" >&3
#####################
# Detect a zip tool #
#####################
echo -n "Detecting zip archiver... " >&3
if which 7za; then
MKZIP="7za a -r -mx9"
elif which 7z; then
MKZIP="7z a -r -mx9"
elif which zip; then
MKZIP="zip -r"
else
echo "Not found." >&3
exit 1
fi
echo "$MKZIP" >&3
###########################
# Detect a build back-end #
###########################
echo -n "Detected build back-end... " >&3
if which ninja; then
CMAKE_GENERATOR="Ninja"
BUILD="ninja -v"
elif which make; then
CMAKE_GENERATOR="MSYS Makefiles"
BUILD="make VERBOSE=1"
else
echo "Not found." >&3
exit 1
fi
echo "$BUILD" >&3
#########################
# Build 64-bit binaries #
#########################
rm -rf $TMP/mCtrl-$VERSION
git clone . $TMP/mCtrl-$VERSION
echo -n "Building 64-bit binaries... " >&3
mkdir -p "$TMP/mCtrl-$VERSION/build64"
(cd "$TMP/mCtrl-$VERSION/build64" && \
cmake -D CMAKE_BUILD_TYPE=Release \
-D CMAKE_C_FLAGS="-m64" \
-D CMAKE_EXE_LINKER_FLAGS="-m64" \
-D CMAKE_SHARED_LINKER_FLAGS="-m64" \
-D CMAKE_RC_FLAGS="--target=pe-x86-64" \
-D DLLTOOL_FLAGS="-m;i386:x86-64;-f;--64" \
-G "$CMAKE_GENERATOR" .. && \
$BUILD > $PRJ/build-x86_64.log 2>&1)
if [ $? -eq 0 ]; then
HAVE_X86_64=yes
echo "Done." >&3
else
echo "Failed. See build-x86_64.log." >&3
fi
#########################
# Build 32-bit binaries #
#########################
echo -n "Building 32-bit binaries... " >&3
mkdir -p "$TMP/mCtrl-$VERSION/build32"
(cd "$TMP/mCtrl-$VERSION/build32" && \
cmake -D CMAKE_BUILD_TYPE=Release \
-D CMAKE_C_FLAGS="-m32 -march=i586 -mtune=core2" \
-D CMAKE_EXE_LINKER_FLAGS="-m32 -march=i586 -mtune=core2" \
-D CMAKE_SHARED_LINKER_FLAGS="-m32 -march=i586 -mtune=core2" \
-D CMAKE_RC_FLAGS="--target=pe-i386" \
-D DLLTOOL_FLAGS="-m;i386;-f;--32" \
-G "$CMAKE_GENERATOR" .. && \
$BUILD > $PRJ/build-x86.log 2>&1)
if [ $? -eq 0 ]; then
HAVE_X86=yes
echo "Done." >&3
else
echo "Failed. See build-x86.log." >&3
fi
##########################
# Generate documentation #
##########################
echo -n "Generate documentation... " >&3
if `which doxygen`; then
(cd $TMP/mCtrl-$VERSION && ( cat Doxyfile ; echo "PROJECT_NUMBER=$VERSION" ) | doxygen - > $PRJ/build-doc.log 2>&1)
if [ $? -ne 0 ]; then
echo "Failed: See build-doc.log."
exit 1
fi
HAVE_DOC=yes
echo "Done." >&3
else
echo "Skipped: doxygen not found in PATH." >&3
fi
###############################
# Make mCtrl-$VERSION-bin.zip #
###############################
echo -n "Packing binary package... " >&3
rm -rf $TMP/mCtrl-$VERSION-src
mv $TMP/mCtrl-$VERSION $TMP/mCtrl-$VERSION-src
mkdir $TMP/mCtrl-$VERSION
if [ x$HAVE_X86 != x ]; then
mkdir -p $TMP/mCtrl-$VERSION/bin
cp $TMP/mCtrl-$VERSION-src/build32/mCtrl.dll $TMP/mCtrl-$VERSION/bin/
cp $TMP/mCtrl-$VERSION-src/build32/ex_*.exe $TMP/mCtrl-$VERSION/bin/
mkdir -p $TMP/mCtrl-$VERSION/lib
cp $TMP/mCtrl-$VERSION-src/build32/libmCtrl.dll.a $TMP/mCtrl-$VERSION/lib/libmCtrl.dll.a
cp $TMP/mCtrl-$VERSION-src/build32/libmCtrl.dll.a $TMP/mCtrl-$VERSION/lib/mCtrl.lib
fi
if [ x$HAVE_X86_64 != x ]; then
mkdir -p $TMP/mCtrl-$VERSION/bin64
cp $TMP/mCtrl-$VERSION-src/build64/mCtrl.dll $TMP/mCtrl-$VERSION/bin64/
cp $TMP/mCtrl-$VERSION-src/build64/ex_*.exe $TMP/mCtrl-$VERSION/bin64/
mkdir -p $TMP/mCtrl-$VERSION/lib64
cp $TMP/mCtrl-$VERSION-src/build64/libmCtrl.dll.a $TMP/mCtrl-$VERSION/lib64/libmCtrl.dll.a
cp $TMP/mCtrl-$VERSION-src/build64/libmCtrl.dll.a $TMP/mCtrl-$VERSION/lib64/mCtrl.lib
fi
if [ x$HAVE_DOC != x ]; then
cp -r $TMP/mCtrl-$VERSION-src/doc $TMP/mCtrl-$VERSION/
fi
cp -r $TMP/mCtrl-$VERSION-src/include $TMP/mCtrl-$VERSION/
cp $TMP/mCtrl-$VERSION-src/AUTHORS $TMP/mCtrl-$VERSION/
cp $TMP/mCtrl-$VERSION-src/COPYING $TMP/mCtrl-$VERSION/
cp $TMP/mCtrl-$VERSION-src/COPYING.lib $TMP/mCtrl-$VERSION/
cp $TMP/mCtrl-$VERSION-src/README $TMP/mCtrl-$VERSION/
find $TMP/mCtrl-$VERSION -name .git -exec rm -rf {} \;
find $TMP/mCtrl-$VERSION -name .gitignore -exec rm {} \;
$MKZIP mCtrl-$VERSION-bin.zip $TMP/mCtrl-$VERSION
if [ $? -ne 0 ]; then
echo "Failed." >&3
exit 1
fi
rm -rf $TMP/mCtrl-$VERSION
echo "Done." >&3
###############################
# Make mCtrl-$VERSION-src.zip #
###############################
echo -n "Packing source package... " >&3
git archive --prefix=mCtrl-$VERSION/ --output=mCtrl-$VERSION-src.zip HEAD
if [ $? -ne 0 ]; then
echo "Failed." >&3
exit 1
fi
echo "Done." >&3
|
Psiphon-Inc/mctrl
|
scripts/release.sh
|
Shell
|
gpl-2.0
| 5,767 |
#!/bin/bash
#
# Docker required
set -eu
case $(git status --porcelain --untracked-files=no) in
"") ;;
*) echo "ERROR: Current working copy has been modified - unmodified copy required so we know we can check it out separately and obtain the same contents"; exit 2;;
esac
current=$(git rev-parse --short HEAD)
echo
echo "Building appimage from revision $current..."
dockerdir=deploy/linux/docker
cat "$dockerdir"/Dockerfile_appimage.in | \
perl -p -e "s/\[\[REVISION\]\]/$current/g" > \
"$dockerdir"/Dockerfile_appimage.gen
cat "$dockerdir"/Dockerfile_test_appimage.in | \
perl -p -e "s/\[\[REVISION\]\]/$current/g" > \
"$dockerdir"/Dockerfile_test_appimage.gen
fgrep 'hg.sr.ht' ~/.ssh/known_hosts > "$dockerdir"/known_hosts
cp ~/.ssh/id_rsa_build "$dockerdir"/id_rsa_build
chmod 600 "$dockerdir"/known_hosts "$dockerdir"/id_rsa_build
trap "rm $dockerdir/known_hosts $dockerdir/id_rsa_build" 0
dockertag="cannam/sonic-visualiser-appimage-$current"
sudo docker build -t "$dockertag" -f "$dockerdir"/Dockerfile_appimage.gen "$dockerdir"
outdir="$dockerdir/output"
mkdir -p "$outdir"
container=$(sudo docker create "$dockertag")
sudo docker cp "$container":/tmp/output-appimage.tar "$outdir"
sudo docker rm "$container"
( cd "$outdir" ; tar xf output-appimage.tar && rm -f output-appimage.tar )
sudo docker build -f "$dockerdir"/Dockerfile_test_appimage.gen "$dockerdir"
|
sonic-visualiser/sonic-visualiser
|
deploy/linux/build-and-test-appimage.sh
|
Shell
|
gpl-2.0
| 1,416 |
./src/openocd -s tcl/ -f /opt/orx/orxsys/staging_dir/host/bin/adapt2.cfg -f /opt/orx/orxsys/tools/openOCD/or1k_xilinx.cfg
|
yangshanjin/openOCD
|
start.sh
|
Shell
|
gpl-2.0
| 123 |
#!/bin/bash
# OpenMeetings 2.0 Automatic Installer and Updater
# Version: 3
# Date: 02/08/2012
# Includes a Menu to choose between upgrade 2.1 or fresh install of 2.0
# Stephen Cottham
#
# Report any problems to the usual mail-list
#
mkdir /tmp/.ivy2
ln -s /tmp/.ivy2
USERNAME=coopadmin
PASSWORD=Zrr1g1M5wUp3CeImh63k
while :
do
clear
echo "**************************"
echo "* OpenMeetings Installer *"
echo "**************************"
echo "* [1] Ubuntu Install"
echo "* [2] Debian Install"
echo "* [3] Upgrade"
echo "* [0] Exit"
echo "**************************"
echo -n "Enter your menu choice [1, 2, 3 or 4]: "
read yourch
case $yourch in
1) clear
echo "Installing Fresh OpenMeetings 2.0 (Ubuntu)"
echo
sleep 1
echo "Creating Working Directory.................................................................."
echo
sleep 1
if ! [ -d "/usr/adm" ]; then
mkdir /usr/adm;
fi
sleep 5 && clear
echo "Updating Repos and Installing Java 6.............................................................................."
echo
sleep 1
apt-get update
apt-get install unzip subversion vim build-essential expect -y
cd /usr/adm
wget -c --no-cookies --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F" \
http://download.oracle.com/otn-pub/java/jdk/6u32-b05/jdk-6u32-linux-x64.bin
cd /usr/adm
chmod +x jdk-6u32-linux-x64.bin
./jdk-6u32-linux-x64.bin
mkdir -p /usr/lib/jvm
mv /usr/adm/jdk1.6.0_32 /usr/lib/jvm/
#expect Enter
update-alternatives --install /usr/bin/javac javac /usr/lib/jvm/jdk1.6.0_32/bin/javac 1
update-alternatives --install /usr/bin/java java /usr/lib/jvm/jdk1.6.0_32/bin/java 1
update-alternatives --install /usr/bin/javaws javaws /usr/lib/jvm/jdk1.6.0_32/bin/javaws 1
update-alternatives --config javac
update-alternatives --config java
update-alternatives --config javaws
sleep 5 && clear
echo "Installing OpenOffice and Build Tools............................................................................"
echo
sleep 1
apt-get install openoffice.org-writer openoffice.org-calc openoffice.org-impress -y
apt-get install openoffice.org-draw openoffice.org-math imagemagick sox -y
apt-get install libgif-dev xpdf libfreetype6 libfreetype6-dev libjpeg62 libjpeg8 -y
apt-get install libjpeg8-dev libjpeg-dev libdirectfb-dev -y
apt-get install libart-2.0-2 libt1-5 zip unzip bzip2 subversion git-core -y
apt-get install yasm texi2html libfaac-dev libfaad-dev libmp3lame-dev libsdl1.2-dev libx11-dev -y
apt-get install libxfixes-dev libxvidcore-dev zlib1g-dev libogg-dev sox libvorbis0a libvorbis-dev -y
apt-get install libgsm1 libgsm1-dev libfaad2 flvtool2 lame make g++ -y
sleep 5 && clear
echo "Installing SWFTools.............................................................................................."
echo
sleep 1
cd /usr/adm
wget -c http://www.swftools.org/swftools-2012-04-08-0857.tar.gz
tar -zxvf swftools-2012-04-08-0857.tar.gz
cd swftools-2012-04-08-0857
./configure
make
make install
sleep 5 && clear
echo "Installing FFMpeg..............................................................................................."
echo
sleep 1
cd /usr/adm
wget -c http://ffmpeg.org/releases/ffmpeg-0.11.1.tar.gz
tar -zxvf ffmpeg-0.11.1.tar.gz
cd ffmpeg-0.11.1
./configure --enable-libmp3lame --enable-libxvid --enable-libvorbis \
--enable-libgsm --enable-libfaac --enable-gpl --enable-nonfree
make
make install
sleep 5 && clear
echo "Installing MySQL Server...................................................................."
echo
sleep 1
debconf-set-selections <<< 'mysql-server-5.1 mysql-server/root_password password $PASSWORD'
debconf-set-selections <<< 'mysql-server-5.1 mysql-server/root_password_again password $PASSWORD'
apt-get -y install mysql-server
sleep 5 && clear
echo "Creating DB and setting Permissions......................................................."
echo
sleep 1
mysql -u root -p$PASSWORD -e "CREATE DATABASE openmeetings DEFAULT CHARACTER SET 'utf8';";
mysql -u root -p$PASSWORD -e "GRANT ALL PRIVILEGES ON openmeetings.* TO \"openmeetings\"@\"localhost\" IDENTIFIED BY \"$PASSWORD\" WITH GRANT OPTION;";
sleep 5 && clear
echo "Downloading and installing JODConverter......................................................"
echo
sleep 1
sleep 5 && clear
echo "Installing ApacheAnt......................................................................."
echo
sleep 1
cd /usr/adm
wget -c http://www.trieuvan.com/apache//ant/binaries/apache-ant-1.9.1-bin.tar.gz
tar -zxvf apache-ant-1.9.1-bin.tar.gz
sleep 5 && clear
echo "Checking out OM2.0........................................................................."
echo
sleep 1
cd /usr/adm
svn checkout http://svn.apache.org/repos/asf/openmeetings/branches/2.1/
cd /usr/adm/2.1
sleep 5 && clear
echo "Compiling and Installing OM 2.1............................................................"
echo
sleep 1
/usr/adm/apache-ant-1.9.1/bin/ant clean.all
/usr/adm/apache-ant-1.9.1/bin/ant -Ddb=mysql
cd /usr/adm/2.1/dist
mv red5/ /usr/lib/
cp -R /usr/adm/jodconverter-core-3.0-beta-4 /usr/lib/red5/webapps/openmeetings
sleep 5 && clear
echo "Setting up permissions and creating start-up scripts......................................."
echo
sleep 1
chown -R nobody /usr/lib/red5
chmod +x /usr/lib/red5/red5.sh
chmod +x /usr/lib/red5/red5-debug.sh
rm -f /etc/init.d/red5
touch /etc/init.d/red5
echo '#! /bin/sh
### BEGIN INIT INFO
# Provides: red5
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Starts red5 server for Openmeetings.
### END INIT INFO
# For RedHat and cousins:
# chkconfig: 2345 85 85
# description: Red5 flash streaming server for OpenMeetings
# processname: red5
# Created By: Sohail Riaz ([email protected])
# Modified by Alvaro Bustos
PROG=red5
RED5_HOME=/usr/lib/red5
DAEMON=$RED5_HOME/$PROG.sh
PIDFILE=/var/run/$PROG.pid
[ -r /etc/sysconfig/red5 ] && . /etc/sysconfig/red5
RETVAL=0
case "$1" in
start)
cd $RED5_HOME
start-stop-daemon --start -c nobody --pidfile $PIDFILE \
--chdir $RED5_HOME --background --make-pidfile \
--exec $DAEMON >/dev/null 2>/dev/null &
RETVAL=$?
if [ $RETVAL -eq 0 ]; then
echo $! > $PIDFILE
fi
echo
;;
stop)
start-stop-daemon --stop --quiet --pidfile $PIDFILE \
--name java
rm -f $PIDFILE
echo
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/$PROG
;;
restart|force-reload)
$0 stop
$0 start
;;
status)
# Debian and Ubuntu 10 status check
ps aux | grep -f $PIDFILE >/dev/null 2>/dev/null && RETVAL=0 || RETVAL=3
# Ubuntu 12 status check using improved "start-stop-daemon" status query
# (use the above command, or comment out above command and uncomment the two
below commands.
# start-stop-daemon --status --pidfile $PIDFILE
# RETVAL=$?
[ $RETVAL -eq 0 ] && echo "$PROG is running"
[ $RETVAL -eq 1 ] && echo "$PROG is not running and the pid file exists"
[ $RETVAL -eq 3 ] && echo "$PROG is not running"
[ $RETVAL -eq 4 ] && echo "$PROG - unable to determine status"
;;
checkports)
netstat -anp | grep soffice
netstat -anp | grep java
;;
*)
echo $"Usage: $0 {start|stop|restart|force-reload|status|checkports}"
RETVAL=1
esac
exit $RETVAL
' >> /etc/init.d/red5
chmod +x /etc/init.d/red5
update-rc.d red5 defaults
sleep 5 && clear
echo "Setting up Om to use MYSQL backend................................................"
echo
sleep 1
mv /usr/lib/red5/webapps/openmeetings/WEB-INF/classes/META-INF/persistence.xml \
/usr/lib/red5/webapps/openmeetings/WEB-INF/classes/META-INF/persistence.xml-ori
mv /usr/lib/red5/webapps/openmeetings/WEB-INF/classes/META-INF/mysql_persistence.xml \
/usr/lib/red5/webapps/openmeetings/WEB-INF/classes/META-INF/persistence.xml
sed -i "s/Username=root/Username=$USERNAME/g" /usr/lib/red5/webapps/openmeetings/WEB-INF/classes/META-INF/persistence.xml
sed -i "s/Password=/Password=$PASSWORD/g" /usr/lib/red5/webapps/openmeetings/WEB-INF/classes/META-INF/persistence.xml
/etc/init.d/red5 start
sleep 5 && clear
echo "Openmeetings is now installed please open a browser to:"
echo "http://<OMServerIPaddress>:5080/openmeetings/install"
echo
sleep 5
;;
2) clear
echo "Installing Fresh OpenMeetings 2.0 (Debian)"
echo
sleep 1
echo "Creating Working Directory.................................................................."
echo
sleep 1
if ! [ -d "/usr/adm" ]; then
mkdir /usr/adm;
fi
echo "Backing up sources.list and adding new repos................................................"
echo
sleep 1
mv /etc/apt/sources.list /etc/apt/sources.old
touch /etc/apt/sources.list
echo "deb http://security.debian.org/ wheezy/updates main contrib non-free" >> /etc/apt/sources.list
echo "deb-src http://security.debian.org/ wheezy/updates main contrib non-free" >> /etc/apt/sources.list
echo "deb http://ftp.debian.org/debian/ wheezy main contrib non-free" >> /etc/apt/sources.list
echo "deb-src http://ftp.debian.org/debian/ wheezy main contrib non-free" >> /etc/apt/sources.list
echo "deb-src http://ftp.debian.org/debian/ wheezy-updates main contrib non-free" >> /etc/apt/sources.list
echo "deb http://ftp2.de.debian.org/debian wheezy main non-free" >> /etc/apt/sources.list
echo "deb http://deb-multimedia.org wheezy main" >> /etc/apt/sources.list
apt-get update
apt-get install expect -y
sleep 5 && clear
echo "Installaing Java 6 (Sun)...................................................................."
echo
sleep 1
#apt-get install sun-java6-jdk -y
apt-get update
apt-get install unzip subversion vim build-essential expect -y
cd /usr/adm
wget -c --no-cookies --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F" http://download.oracle.com/otn-pub/java/jdk/6u32-b05/jdk-6u32-linux-x64.bin
cd /usr/adm
chmod +x jdk-6u32-linux-x64.bin
./jdk-6u32-linux-x64.bin
mkdir -p /usr/lib/jvm
mv /usr/adm/jdk1.6.0_32 /usr/lib/jvm/
#expect Enter
update-alternatives --install /usr/bin/javac javac /usr/lib/jvm/jdk1.6.0_32/bin/javac 1
update-alternatives --install /usr/bin/java java /usr/lib/jvm/jdk1.6.0_32/bin/java 1
update-alternatives --install /usr/bin/javaws javaws /usr/lib/jvm/jdk1.6.0_32/bin/javaws 1
update-alternatives --config javac
update-alternatives --config java
update-alternatives --config javaws
sleep 5 && clear
echo "Installing OpenOffice and needed pre-req's.................................................."
echo
sleep 1
apt-get install openoffice.org-writer openoffice.org-calc openoffice.org-impress \
openoffice.org-draw openoffice.org-math imagemagick gs-gpl -y
apt-get install libgif-dev xpdf php5 php5-mysql libfreetype6 libfreetype6-dev libjpeg8 libjpeg62 libjpeg8-dev -y
apt-get install g++ libjpeg-dev libdirectfb-dev libart-2.0-2 libt1-5 zip unzip bzip2 -y
apt-get install subversion git-core yasm texi2html libfaac-dev libfaad-dev -y
apt-get install libmp3lame-dev libsdl1.2-dev libx11-dev libxfixes-dev libxvidcore4-dev zlib1g-dev -y
apt-get install libogg-dev sox libvorbis0a libvorbis-dev libgsm1 libgsm1-dev libfaad2 subversion flvtool2 lame --force-yes -y
sleep 5 && clear
echo "Installing MySQL Server...................................................................."
echo
sleep 1
debconf-set-selections <<< 'mysql-server-5.1 mysql-server/root_password password $PASSWORD'
debconf-set-selections <<< 'mysql-server-5.1 mysql-server/root_password_again password $PASSWORD'
apt-get -y install mysql-server
sleep 5 && clear
echo "Creating DB and setting Permissions......................................................."
echo
mysql -u root -p$PASSWORD -e "CREATE DATABASE openmeetings DEFAULT CHARACTER SET 'utf8';";
mysql -u root -p$PASSWORD -e "GRANT ALL PRIVILEGES ON openmeetings.* TO \"openmeetings\"@\"localhost\" IDENTIFIED BY \"$PASSWORD\" WITH GRANT OPTION;";
sleep 5 && clear
echo "Compiling and installing SWFTools........................................................."
echo
sleep 1
wget -c http://www.swftools.org/swftools-2012-04-08-0857.tar.gz
tar -zxvf swftools-2012-04-08-0857.tar.gz
cd swftools-2012-04-08-0857
./configure
make
make install
sleep 5 && clear
echo "Compiling and installing FFMpeg..........................................................."
echo
sleep 1
cd /usr/adm
wget -c http://ffmpeg.org/releases/ffmpeg-0.11.1.tar.gz
tar -zxvf ffmpeg-0.11.1.tar.gz
cd ffmpeg-0.11.1
./configure --enable-libmp3lame --enable-libxvid --enable-libvorbis --enable-libgsm \
--enable-libfaac --enable-gpl --enable-nonfree
make
make install
sleep 5 && clear
echo "Downloading and installing JODConverter......................................................"
echo
sleep 1
cd /usr/adm
wget -c http://jodconverter.googlecode.com/files/jodconverter-core-3.0-beta-4-dist.zip
unzip jodconverter-core-3.0-beta-4-dist.zip
sleep 5 && clear
echo "Installing ApacheAnt......................................................................."
echo
sleep 1
cd /usr/adm
wget -c http://www.trieuvan.com/apache//ant/binaries/apache-ant-1.9.1-bin.tar.gz
tar -zxvf apache-ant-1.9.1-bin.tar.gz
sleep 5 && clear
echo "Checking out OM2.1........................................................................."
echo
sleep 1
cd /usr/adm
svn checkout http://svn.apache.org/repos/asf/openmeetings/branches/2.1/
cd /usr/adm/2.1
sleep 5 && clear
echo "Compiling and Installing OM 2.1............................................................"
echo
sleep 1
/usr/adm/apache-ant-1.9.1/bin/ant clean.all
/usr/adm/apache-ant-1.9.1/bin/ant -Ddb=mysql
cd /usr/adm/2.1/dist
mv red5/ /usr/lib/
cp -R /usr/adm/jodconverter-core-3.0-beta-4 /usr/lib/red5/webapps/openmeetings
sleep 5 && clear
echo "Setting up permissions and creating start-up scripts......................................."
echo
sleep 1
chown -R nobody /usr/lib/red5
chmod +x /usr/lib/red5/red5.sh
chmod +x /usr/lib/red5/red5-debug.sh
rm -f /etc/init.d/red5
touch /etc/init.d/red5
echo '#! /bin/sh
### BEGIN INIT INFO
# Provides: red5
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Starts red5 server for Openmeetings.
### END INIT INFO
# For RedHat and cousins:
# chkconfig: 2345 85 85
# description: Red5 flash streaming server for OpenMeetings
# processname: red5
# Created By: Sohail Riaz ([email protected])
# Modified by Alvaro Bustos
PROG=red5
RED5_HOME=/usr/lib/red5
DAEMON=$RED5_HOME/$PROG.sh
PIDFILE=/var/run/$PROG.pid
[ -r /etc/sysconfig/red5 ] && . /etc/sysconfig/red5
RETVAL=0
case "$1" in
start)
cd $RED5_HOME
start-stop-daemon --start -c nobody --pidfile $PIDFILE \
--chdir $RED5_HOME --background --make-pidfile \
--exec $DAEMON >/dev/null 2>/dev/null &
RETVAL=$?
if [ $RETVAL -eq 0 ]; then
echo $! > $PIDFILE
fi
echo
;;
stop)
start-stop-daemon --stop --quiet --pidfile $PIDFILE \
--name java
rm -f $PIDFILE
echo
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/$PROG
;;
restart|force-reload)
$0 stop
$0 start
;;
status)
# Debian and Ubuntu 10 status check
ps aux | grep -f $PIDFILE >/dev/null 2>/dev/null && RETVAL=0 || RETVAL=3
# Ubuntu 12 status check using improved "start-stop-daemon" status query
# (use the above command, or comment out above command and uncomment the two
below commands.
# start-stop-daemon --status --pidfile $PIDFILE
# RETVAL=$?
[ $RETVAL -eq 0 ] && echo "$PROG is running"
[ $RETVAL -eq 1 ] && echo "$PROG is not running and the pid file exists"
[ $RETVAL -eq 3 ] && echo "$PROG is not running"
[ $RETVAL -eq 4 ] && echo "$PROG - unable to determine status"
;;
checkports)
netstat -anp | grep soffice
netstat -anp | grep java
;;
*)
echo $"Usage: $0 {start|stop|restart|force-reload|status|checkports}"
RETVAL=1
esac
exit $RETVAL
' >> /etc/init.d/red5
chmod +x /etc/init.d/red5
update-rc.d red5 defaults
sleep 5 && clear
echo "Setting up Om to use MYSQL backend................................................"
echo
sleep 1
mv /usr/lib/red5/webapps/openmeetings/WEB-INF/classes/META-INF/persistence.xml \
/usr/lib/red5/webapps/openmeetings/WEB-INF/classes/META-INF/persistence.xml-ori
mv /usr/lib/red5/webapps/openmeetings/WEB-INF/classes/META-INF/mysql_persistence.xml \
/usr/lib/red5/webapps/openmeetings/WEB-INF/classes/META-INF/persistence.xml
sed -i "s/Username=root/Username=$USERNAME/g" /usr/lib/red5/webapps/openmeetings/WEB-INF/classes/META-INF/persistence.xml
sed -i "s/Password=/Password=$PASSWORD/g" /usr/lib/red5/webapps/openmeetings/WEB-INF/classes/META-INF/persistence.xml
sed -i 's!<rtmpsslport>443</rtmpsslport>!<rtmpsslport>8443</rtmpsslport>!' /usr/lib/red5/webapps/openmeetings/config.xml
/etc/init.d/red5 start
sleep 5 && clear
echo "Openmeetings is now installed please open a browser to:"
echo "http://<OMServerIPaddress>:5080/openmeetings/install"
echo
echo "be sure and add these to firewall rules"
echo "#openmeetings"
echo "ACCEPT net fw tcp 5080"
echo "ACCEPT net fw tcp 1935"
echo "ACCEPT net fw tcp 8088"
sleep 25
exit 0;;
;;
3) clear
echo "Updating Openmeetings to latest Build...................................................."
echo
echo "Creating Working Directory.................................................................."
echo
sleep 1
if ! [ -d "/usr/adm" ]; then
mkdir /usr/adm;
fi
echo
echo "Stopping running OM Services..................................................................."
echo
sleep 1
/etc/init.d/red5 stop
echo
echo "Backing up current OM configuration............................................................"
echo
sleep 1
mv /usr/adm/backup_om.zip /usr/adm/backup_om.$(date -u +\%Y\%m\%d\%H\%M\%S).zip
cd /usr/lib/red5
./admin.sh -b -file /usr/adm/backup_om.zip
echo
echo "Dropping current Openmeetings Database........................................................."
echo
sleep 1
mysql -u root -p$PASSWORD -e "DROP DATABASE openmeetings;";
echo
echo "Creating DB and setting Permissions......................................................."
echo
sleep 1
mysql -u root -p$PASSWORD -e "CREATE DATABASE openmeetings DEFAULT CHARACTER SET 'utf8';";
mysql -u root -p$PASSWORD -e "GRANT ALL PRIVILEGES ON openmeetings.* TO \"openmeetings\"@\"localhost\" IDENTIFIED BY \"$PASSWORD\" WITH GRANT OPTION;";
sleep 5 && clear
echo "Archiving old OM Instance......................................................."
echo
sleep 1
mv /usr/lib/red5 /usr/lib/red5.$(date -u +\%Y\%m\%d\%H\%M\%S)
echo "Checking out Latest Build......................................................."
echo
sleep 1
cd /usr/adm
rm -Rf /usr/adm/singlewebapp
svn checkout http://svn.apache.org/repos/asf/incubator/openmeetings/trunk/singlewebapp/
sleep 5 && clear
echo "Installing ApacheAnt............................................................"
echo
sleep 1
cd /usr/adm
wget -c http://www.trieuvan.com/apache//ant/binaries/apache-ant-1.9.1-bin.tar.gz
tar -zxvf apache-ant-1.9.1-bin.tar.gz
sleep 5 && clear
echo "Compiling and Installing latest OpenMeetings Build.............................."
echo
sleep 1
cd /usr/adm/singlewebapp
/usr/adm/apache-ant-1.9.1/bin/ant clean.all
/usr/adm/apache-ant-1.9.1/bin/ant -Ddb=mysql
mv /usr/adm/singlewebapp/dist/red5/ /usr/lib/
cp -R /usr/adm/jodconverter-core-3.0-beta-4 /usr/lib/red5/webapps/openmeetings
sleep 5 && clear
echo "Setting permissions on OM Files................................................."
echo
sleep 1
chown -R nobody /usr/lib/red5
chmod +x /usr/lib/red5/red5.sh
chmod +x /usr/lib/red5/red5-debug.sh
echo "Setting up Om to use MYSQL backend.............................................."
echo
sleep 1
mv /usr/lib/red5/webapps/openmeetings/WEB-INF/classes/META-INF/persistence.xml \
/usr/lib/red5/webapps/openmeetings/WEB-INF/classes/META-INF/persistence.xml-ori
mv /usr/lib/red5/webapps/openmeetings/WEB-INF/classes/META-INF/mysql_persistence.xml \
/usr/lib/red5/webapps/openmeetings/WEB-INF/classes/META-INF/persistence.xml
sed -i "s/Username=root/Username=$USERNAME/g" /usr/lib/red5/webapps/openmeetings/WEB-INF/classes/META-INF/persistence.xml
sed -i "s/Password=/Password=$PASSWORD/g" /usr/lib/red5/webapps/openmeetings/WEB-INF/classes/META-INF/persistence.xml
echo "Restoring OM Configuration....................................................."
echo
sleep 1
cd /usr/lib/red5
./admin.sh -i -file /usr/adm/backup_om.zip
sleep 5
echo "starting OM...................................................................."
echo
/etc/init.d/red5 start
sleep 10
sleep 5 && clear
echo "OpenMeetings has now been updated.............................................."
sleep 5
;;
4) exit 0;;
*) echo "Oopps!!! Please select choice 1, 2, 3 or 4";
echo "Press Enter to continue. . ." ; read ;;
esac
done
|
WebHostingCoopTeam/AdminScripts
|
openmeetings/Ubuntu_Debian_Installer_Upgrader-whc.sh
|
Shell
|
gpl-2.0
| 20,410 |
#!/bin/bash
if [ $# -lt 2 ]; then
echo "To few parametrs. Usage:"
echo "debpackage source_dir version"
exit 1
fi
source_dir=$1
deb_folder=/tmp/deb
mkdir -p $deb_folder
cp -r package/* $deb_folder
mkdir -p $deb_folder/usr/lib/x86_64-linux-gnu/perl5/5.20/nginx
mkdir -p $deb_folder/usr/sbin
mkdir -p $deb_folder/usr/share/perl5/5.20/
mkdir -p $deb_folder/etc/perl
mkdir -p $deb_folder/lib
cp $source_dir/conf/* $deb_folder/etc/nginx/
cp $source_dir/objs/nginx $deb_folder/usr/sbin/nginx
sed -i s/%%VERSION%%/$2/g $source_dir/src/http/modules/perl/nginx.pm
#cp $source_dir/objs/src/http/modules/perl/blib/arch/auto/nginx/nginx.so $deb_folder/lib || true:
cp $source_dir/src/http/modules/perl/nginx.pm $deb_folder/usr/share/perl5/5.20/ || :
cp $source_dir/src/http/modules/perl/nginx.pm $deb_folder/usr/lib/x86_64-linux-gnu/perl5/5.20/ || :
cp $source_dir/src/http/modules/perl/nginx.pm $deb_folder/usr/share/perl5/ || :
cp $source_dir/src/http/modules/perl/nginx.pm $deb_folder/etc/perl/ || :
fpm \
-s dir \
-t deb \
-n nginx \
-m "Marcin Kaciuba <[email protected]>" \
--vendor "Local compilation" \
--description "nginx web server" \
--license "nonfree" \
--url "https://mkaciuba.com" \
-p nginx-extras-amd64-${2}.deb \
-v ${2} \
-C $deb_folder\
-a all \
--conflicts "nginx-extras.deb, nginx-extras, nginx-common, nginx-full, nginx" \
--replaces "nginx-extras.deb, nginx-extras, nginx-common, nginx-full, nginx" \
-d libluajit-5.1-dev \
-d perl-base \
--pre-install scripts/preinst \
--post-install scripts/postinst
|
Aldor007/ngx_compile_script
|
debpackage.sh
|
Shell
|
gpl-2.0
| 1,583 |
#!/bin/sh
echo "------------------"
echo "| Running Tests |"
echo "------------------"
bundle exec autotest
|
e-jambon/odin_project
|
RspecPlayground/Links/runtest.sh
|
Shell
|
gpl-2.0
| 111 |
#!/bin/sh -e
# no options
# converts spice file to type-dependency graph
for f
do
printf "Generating graph for $f-pdf ..."
spice_type_deps.awk $f | sort -u | deps_to_dot.awk > $f-dot
dot -Tpdf $f-dot -o $f-pdf
echo " done."
done
|
fangism/hackt
|
lib/scripts/spice_type_deps_to_pdf.sh
|
Shell
|
gpl-2.0
| 235 |
#!/bin/sh
## START CONFIGURATION
# Location where the .torrent files are stored locally
TORRENT_FILE_PATH='/home/tblyler/torrent_files'
# Location to initially download torrent data to from the remote SSH server
TORRENT_TMP_DOWNLOAD='/home/tblyler/torrents_tmp'
# Location to move the completed torrent data to from TORRENT_TMP_DOWNLOAD
TORRENT_DOWNLOAD='/home/tblyler/torrents'
# Amount of rsync processes to have running at one time
RSYNC_PROCESSES=2
# Location on the remote SSH server to copy the .torrent files to
SSH_SERVER_TORRENT_FILE_PATH='watch'
# Location on the remote SSH server where the torrent data is stored
SSH_SERVER_DOWNLOAD_PATH='files'
# Address of the remote SSH server where the torrents are downloaded
SSH_SERVER='remote.rtorrent.com'
# The username to use to login to the SSH server
SSH_USER='sshUserName'
# The XMLRPC basic HTTP authentication username
XML_USER='XMLRPCUserName'
# The XMLRPC basic HTTP authentication password
XML_PASS='XMLRPCPassword'
# The XMLRPC url
XML_URL='https://XMLRPCURL.com/XMLRPC'
## END CONFIGURATION
if ! which curl > /dev/null; then
echo 'curl must be installed'
exit 1
fi
if ! which scp > /dev/null; then
echo 'scp must be installed'
exit 1
fi
if ! which rsync > /dev/null; then
echo 'rsync must be installed'
exit 1
fi
if ! which python > /dev/null; then
if ! which python2 > /dev/null; then
echo 'python must be install'
exit 1
fi
fi
# Hacky method to create the XML for an XMLRPC request to rtorrent
xml() {
local method=$1
local args=$2
echo "<?xml version='1.0'?>
<methodCall>
<methodName>${method}</methodName>
<params>
<param>
<value><string>${args}</string></value>
</param>
</params>
</methodCall>"
}
# Returns the current entity and its content in an XML response
read_dom() {
local IFS=\>
read -d \< ENTITY CONTENT
}
# Sends an XMLRPC request to rtorrent via curl and returns its data
xml_curl() {
local method=$1
local args=$2
local xml_post=`xml "${method}" "${args}"`
local curl_command='curl -s'
if [[ "${XML_USER}" != '' ]]; then
local curl_command="${curl_command} --basic -u '${XML_USER}"
if [[ "${XML_USER}" != '' ]]; then
local curl_command="${curl_command}:${XML_PASS}"
fi
local curl_command="${curl_command}'"
fi
local curl_command="${curl_command} -d \"${xml_post}\" '${XML_URL}'"
local xml_response=$(eval "${curl_command}")
local curl_return=$?
echo "${xml_response}"
return $curl_return
}
# Gets .torrent's name from the remote rtorrent XMLRPC
get_torrent_name() {
local torrent_hash=$1
local xml_response=`xml_curl d.get_name "${torrent_hash}"`
local curl_return=$?
if [[ "${curl_return}" -ne 0 ]]; then
echo "Curl failed to get torrent name with error code ${curl_return}"
return $curl_return
fi
local torrent_name=`echo "${xml_response}" | while read_dom; do
if [[ "${ENTITY}" = "name" ]] && [[ "${CONTENT}" = "faultCode" ]]; then
local error=true
fi
if [[ ! "${error}" ]] && [[ "${ENTITY}" = "string" ]]; then
echo "${CONTENT}"
fi
done`
if [[ "${torrent_name}" = '' ]]; then
echo "${xml_response}"
return 1
else
echo "${torrent_name}"
return 0
fi
}
# Get .torrent's completion status from the remote rtorrent
get_torrent_complete() {
local torrent_hash=$1
local xml_response=`xml_curl d.get_complete "${torrent_hash}"`
local curl_return=$?
if [[ "${curl_return}" -ne 0 ]]; then
echo "Curl failed to get torrent name with error code ${curl_return}"
return ${curl_return}
fi
local torrent_completed=`echo "${xml_response}" | while read_dom; do
if [[ "${ENTITY}" = "name" ]] && [[ "${CONTENT}" = "faultCode" ]]; then
local error=true
fi
if [[ ! "${error}" ]] && [[ "${ENTITY}" = "i8" ]]; then
echo "${CONTENT}"
fi
done`
if [[ "${torrent_completed}" = '' ]]; then
echo "${xml_response}"
return 1
else
echo "${torrent_completed}"
return 0
fi
}
# Check if a .torrent is loaded on the remote rtorrent
get_torrent_added() {
local torrent_hash=$1
local xml_response=`xml_curl d.get_complete "${torrent_hash}"`
local curl_return=$?
if [[ "${curl_return}" -ne 0 ]]; then
echo "Curl failed to get torrent name with error code ${curl_return}"
return ${curl_return}
fi
local torrent_added=`echo "${xml_response}" | while read_dom; do
if [[ "${CONTENT}" = 'Could not find info-hash.' ]]; then
echo "${CONTENT}"
fi
done`
if [[ "${torrent_added}" = '' ]]; then
echo 1
else
echo 0
fi
}
# Get the info hash for a given .torrent file
get_torrent_hash() {
local torrent_file=$1
if [[ ! -f "${torrent_file}" ]]; then
return 1
fi
local python_bin='python2'
if ! which "${python_bin}" 2>&1 > /dev/null; then
local python_bin='python'
if ! which "${python_bin}" 2>&1 > /dev/null; then
return 1
fi
fi
local torrent_hash=`"${python_bin}" - << END
import hashlib
def compute_hash(file_path):
try:
data = open(file_path, 'rb').read()
except:
return False
data_len = len(data)
start = data.find("infod")
if start == -1:
return False
start += 4
current = start + 1
dir_depth = 1
while current < data_len and dir_depth > 0:
if data[current] == 'e':
dir_depth -= 1
current += 1
elif data[current] == 'l' or data[current] == 'd':
dir_depth += 1
current += 1
elif data[current] == 'i':
current += 1
while data[current] != 'e':
current += 1
current += 1
elif data[current].isdigit():
num = data[current]
current += 1
while data[current] != ':':
num += data[current]
current += 1
current += 1 + int(num)
else:
return False
return hashlib.sha1(data[start:current]).hexdigest().upper()
print(compute_hash("${torrent_file}"))
END
`
if [[ ! $? ]] || [[ "${torrent_hash}" = 'False' ]]; then
return 1
fi
echo $torrent_hash
}
# keep track of the .torrent files to be downloaded
declare -A TORRENT_QUEUE
# keep track of the rsyncs to download torrent data
declare -A RUNNING_RSYNCS
# run indefinitely
while true; do
# check to make sure the path of the local .torrent files exists
if [[ ! -d "${TORRENT_FILE_PATH}" ]]; then
echo "${TORRENT_FILE_PATH} Does not exist"
exit 1
fi
OIFS="$IFS"
IFS=$'\n'
# enumerate the .torrent file directory
for file in `find "${TORRENT_FILE_PATH}"`; do
# check if the path is a directory
if [[ -d "${file}" ]]; then
# enumerate the directory
for sub_file in `find "${file}" -type f`; do
# this is the furthest we will descend
if [[ -f "${sub_file}" ]]; then
# get the torrent hash for the .torrent file
torrent_hash=`get_torrent_hash "${sub_file}"`
if [[ ! $? ]]; then
echo "Failed to get the torrent hash of ${sub_file}"
continue
fi
# add the torrent to the queue if it is not already in the queue
if [[ ! ${TORRENT_QUEUE[${torrent_hash}]+_} ]]; then
TORRENT_QUEUE[$torrent_hash]="${sub_file}"
fi
fi
done
# check that the path is a file
elif [[ -f "${file}" ]]; then
# get the torrent hash for the .torrent file
torrent_hash=`get_torrent_hash "${file}"`
if [[ ! $? ]]; then
echo "Failed to get the torrent hash of ${file}"
continue
fi
# add the torrent to the queue if it is not already in the queue
if [[ ! ${TORRENT_QUEUE[${torrent_hash}]+_} ]]; then
TORRENT_QUEUE[$torrent_hash]="${file}"
fi
fi
done
IFS="$OIFS"
# go through the torrent queue
for torrent_hash in "${!TORRENT_QUEUE[@]}"; do
# continue if the torrent is already being downloaded
if [[ ${RUNNING_RSYNCS[$torrent_hash]+_} ]]; then
continue
fi
# check to see if the torrent is on the rtorrent server
torrent_added=`get_torrent_added "${torrent_hash}"`
if [[ ! $? ]]; then
echo "Failed to see if ${TORRENT_QUEUE[$torrent_hash]} exists on the rtorrent server"
continue
fi
# if the torrent is not on the rtorrent server, upload it
if [[ $torrent_added -eq 0 ]]; then
scp "${TORRENT_QUEUE[$torrent_hash]}" "${SSH_USER}@${SSH_SERVER}:${SSH_SERVER_TORRENT_FILE_PATH}"
if [[ ! $? ]]; then
echo "Failed to upload ${TORRENT_QUEUE[$torrent_hash]}"
fi
fi
done
# if the amount of running rsyncs is below the desire amount, run items from the queue
for torrent_hash in "${!TORRENT_QUEUE[@]}"; do
# break out of the loop if we added enough jobs already
if [[ ${#RUNNING_RSYNCS[@]} -ge ${RSYNC_PROCESSES} ]]; then
break
fi
# make sure this torrent is not already being downloaded
if [[ ${RUNNING_RSYNCS[${torrent_hash}]+_} ]]; then
continue
fi
# see if the torrent is finished downloading remotely
torrent_completed=`get_torrent_complete "${torrent_hash}"`
if [[ ! $? ]]; then
echo "Failed to check if ${TORRENT_QUEUE[$torrent_hash]} is completed"
continue
fi
# the torrent is finished downloading remotely
if [[ "${torrent_completed}" -eq 1 ]]; then
torrent_name=`get_torrent_name "${torrent_hash}"`
if [[ ! $? ]]; then
echo "Failed to get torrent name for ${TORRENT_QUEUE[$torrent_hash]}"
continue
fi
# start the download and record the PID
echo "Started download for ${torrent_name} (${TORRENT_QUEUE[$torrent_hash]})"
rsync -hrvP --inplace "${SSH_USER}@${SSH_SERVER}:\"${SSH_SERVER_DOWNLOAD_PATH}/${torrent_name}"\" "${TORRENT_TMP_DOWNLOAD}/" > /dev/null &
RUNNING_RSYNCS[${torrent_hash}]=$!
fi
done
# checkup on the running rsyncs
for torrent_hash in "${!RUNNING_RSYNCS[@]}"; do
pid=${RUNNING_RSYNCS[$torrent_hash]}
# check to see if the given PID is still running
if ! kill -0 "${pid}" 2> /dev/null; then
# get the return code of the PID
wait $pid
return=$?
if [[ $return ]]; then
echo "Successfully downloaded ${TORRENT_QUEUE[$torrent_hash]}"
torrent_name=`get_torrent_name "${torrent_hash}"`
if [[ $? ]]; then
final_location_dir="${TORRENT_DOWNLOAD}"
if [[ `dirname "${TORRENT_QUEUE[$torrent_hash]}"` != "${TORRENT_FILE_PATH}" ]]; then
final_location_dir="${final_location_dir}/$(basename "`dirname "${TORRENT_QUEUE[$torrent_hash]}"`")"
fi
if [[ ! -d "${final_location_dir}" ]]; then
mkdir -p "${final_location_dir}"
fi
mv "${TORRENT_TMP_DOWNLOAD}/${torrent_name}" "${final_location_dir}/"
rm "${TORRENT_QUEUE[$torrent_hash]}"
unset TORRENT_QUEUE[$torrent_hash]
else
echo "Failed to get torrent name for ${TORRENT_QUEUE[$torrent_hash]}"
fi
else
echo "Failed to download ${TORRENT_QUEUE[$torrent_hash]} with rsync return code $return"
fi
unset RUNNING_RSYNCS[$torrent_hash]
fi
done
sleep 5s
done
|
Longcat00/hoarder
|
hoarder.sh
|
Shell
|
gpl-2.0
| 10,696 |
#!/bin/bash
# Set up a local virtual display of 2x2 quads.
# These have the following layout in terms of port numbers:
#
# #1 #2
# #0 #3 #6
# #5 #4
DIMENSIONS=-w160x90
# Each display has a separate server
./compute -s -p8080 -b -x160x90 $DIMENSIONS &
./compute -s -p8081 -b -x80x0 $DIMENSIONS &
./compute -s -p8082 -b -x240x0 $DIMENSIONS &
./compute -s -p8083 -b -x320x90 $DIMENSIONS &
./compute -s -p8084 -b -x240x180 $DIMENSIONS &
./compute -s -p8085 -b -x80x180 $DIMENSIONS &
./compute -s -p8086 -b -x0x90 $DIMENSIONS &
# Set up the client window. The file nodehex.txt defines the
# hostnames and port numbers for each remote display
./compute -c -nnodehex.txt
|
mistalro/gpufractal
|
app/configs/localhex.sh
|
Shell
|
gpl-2.0
| 697 |
#!/bin/bash
level=$1
gridloc=$2
STORM_NAME=$3
BASE=$4
BASIN=$5
WORKD=$BASE/outdat/$STORM_NAME
EXED=$BASE/exe
SHEL=$BASE/shells
INPD=$BASE/inputfiles
FCOD=$BASE/fcode
PYTH=$BASE/python_codes
yearmon=`echo $STORM_NAME | cut -c1-6`
cd $WORKD
lev=$level
echo $grd
dirn=$lev"-field"
cd $dirn
cp $INPD/grids/$gridloc/*$lev* .
/u/thesser1/anaconda/bin/python $PYTH/create_field_nc.py $yearmon $BASIN $lev
tarmax1=$STORM_NAME"_"$lev"_MMf.tgz"
tar -czf $tarmax1 wis*max_mean.nc
#rm wis*max_mean.nc
tarname1=$STORM_NAME"_"$lev"_field.tgz"
tar -czf $tarname1 wis*.nc
#tar -czf $tarname1 wis*.h5
mv *.tgz $WORKD
cd $WORKD
rm -rf $dirn
|
CHL-WIS/WIS_PAC
|
shells/ww3_make_nc_field.sh
|
Shell
|
gpl-2.0
| 627 |
#!/bin/sh -x
#change daily snapshot into a backup or release
#the first argument should be a version number or a date
INSTALLDIR=/home/www/agbkb/forschung/formal_methods/CoFI/hets
if [ -z "$1" ] ; then
VERSION=`date +%F`
else
VERSION=$1
fi
case `uname -s` in
SunOS) TAR=gtar;;
*) TAR=tar;;
esac
for i in linux linux64 pc-solaris intel-mac
do
(cd $INSTALLDIR/$i; cp -p daily/hets.bz2 versions/hets-$VERSION.bz2)
done
(cd $INSTALLDIR/src-distribution; \
cp -p daily/Hets-src*.t* versions/Hets-src-$VERSION.tgz; \
cd versions; rm -rf Hets; \
$TAR zxf Hets-src-$VERSION.tgz)
# also unpack the new release as "recent overview of the modules"
|
nevrenato/Hets_Fork
|
utils/install.sh
|
Shell
|
gpl-2.0
| 656 |
function get_files
{
echo kdenlive.xml
}
function po_for_file
{
case "$1" in
kdenlive.xml)
echo kdenlive_xml_mimetypes.po
;;
esac
}
function tags_for_file
{
case "$1" in
kdenlive.xml)
echo comment
;;
esac
}
|
rugubara/kdenlive-15.08.1
|
data/XmlMessages.sh
|
Shell
|
gpl-2.0
| 279 |
#!/bin/bash
# Instantaneous date/time
DATE=$(date +%m%d)
TIME=$(date +%H%M)
START_TIME_SEC=$(date +%s)
# Using an uninitialized variable? Executed command throws an error? Quit
set -u
set -e
###############
# DEFINITIONS #
###############
# Toolchain paths
TOOLCHAIN=/home/vb/toolchain/arm-2011.03/bin
TOOLCHAIN_PREFIX=arm-none-linux-gnueabi-
STRIP=${TOOLCHAIN}/${TOOLCHAIN_PREFIX}strip
# Kernel version tag
KERNEL_VERSION="KGB Kernel for Samsung SCH-I500. Buildcode: $DATE.$TIME"
# Other paths
ROOTDIR=`pwd`
BUILDDIR=$ROOTDIR/build
WORKDIR=$BUILDDIR/bin
OUTDIR=$BUILDDIR/out
KERNEL_IMAGE=$ROOTDIR/arch/arm/boot/zImage
####################
# HELPER FUNCTIONS #
####################
echo_msg()
# $1: Message to print to output
{
echo "
*** $1 ***
"
}
makezip()
# $1: Name of output file without extension
# Creates $OUTDIR/$1.zip
{
echo "Creating: $OUTDIR/$1.zip"
pushd $WORKDIR/update-zip/META-INF/com/google/android > /dev/null
sed s_"\$DATE"_"$DATE"_ < updater-script > updater-script.tmp
mv -f updater-script.tmp updater-script
popd > /dev/null
pushd $WORKDIR/update-zip
zip -r -q "$1.zip" .
mv -f "$1.zip" $OUTDIR/
popd > /dev/null
}
makeodin()
# $1: Name of output file without extension
# Creates $OUTDIR/$1.tar.md5
{
echo "Creating: $OUTDIR/$1.tar.md5"
pushd $WORKDIR > /dev/null
tar -H ustar -cf "$1.tar" zImage
md5sum -t "$1.tar" >> "$1.tar"
mv -f "$1.tar" "$OUTDIR/$1.tar.md5"
popd
}
####################
# SCRIPT MAIN BODY #
####################
echo "Build script run on $(date -R)"
echo_msg "BUILD START: $KERNEL_VERSION"
# Clean kernel and old files
echo_msg "CLEANING FILES FROM PREVIOUS BUILD"
rm -rf $WORKDIR
rm -rf $OUTDIR
make CROSS_COMPILE=$TOOLCHAIN/$TOOLCHAIN_PREFIX clean mrproper
# Generate config
echo_msg "CONFIGURING KERNEL"
make ARCH=arm CROSS_COMPILE=$TOOLCHAIN/$TOOLCHAIN_PREFIX kgb_defconfig
# Generate initramfs
echo_msg "GENERATING INITRAMFS"
mkdir -p $WORKDIR
cp -rf $BUILDDIR/initramfs $WORKDIR/
# Make modules, strip and copy to generated initramfs
echo_msg "BUILDING MODULES"
make ARCH=arm CROSS_COMPILE=$TOOLCHAIN/$TOOLCHAIN_PREFIX modules
for line in `cat modules.order`
do
echo ${line:7}
cp -f ${line:7} $WORKDIR/initramfs/lib/modules/
$STRIP --strip-debug $WORKDIR/initramfs/lib/modules/$(basename $line)
done
# Replace source-built OneNAND driver with stock modules from EH03
cp -f $BUILDDIR/initramfs-EH03/lib/modules/dpram_atlas.ko $WORKDIR/initramfs/lib/modules/dpram_atlas.ko
cp -f $BUILDDIR/initramfs-EH03/lib/modules/dpram_recovery.ko $WORKDIR/initramfs/lib/modules/dpram_recovery.ko
# Remove unwanted initramfs files
rm -f $WORKDIR/initramfs/lib/modules/hotspot_event_monitoring.ko
# Write kernel version tag into initramfs root
echo $KERNEL_VERSION > $WORKDIR/initramfs/kernel_version
# Make kernel
echo_msg "BUILDING KERNEL"
make -j `expr $(grep processor /proc/cpuinfo | wc -l) + 1` \
ARCH=arm CROSS_COMPILE=$TOOLCHAIN/$TOOLCHAIN_PREFIX
# Create packages
echo_msg "CREATING CWM AND ODIN PACKAGES"
cp -rf $BUILDDIR/update-zip $WORKDIR/
cp -f $KERNEL_IMAGE $WORKDIR/update-zip/kernel_update/zImage
cp -f $KERNEL_IMAGE $WORKDIR/zImage
mkdir -p $OUTDIR
makezip "KGB-I500-$DATE.$TIME"
makeodin "KGB-I500-$DATE.$TIME"
# If you are not me, this ain't here kthx >;]
if [ -d /mnt/vbs ]; then
cp -f "$OUTDIR/KGB-I500-$DATE.$TIME.tar.md5" /mnt/vbs/
cp -f "$OUTDIR/KGB-I500-$DATE.$TIME.zip" /mnt/vbs/
makeodin "KGB-I500-$DATE"
fi
#######
# END #
#######
echo_msg "BUILD COMPLETE: $KERNEL_VERSION"
END_TIME_SEC=$(date +%s)
TIME_DIFF=$(($END_TIME_SEC - $START_TIME_SEC))
echo "Build script exiting on $(date -R). Elapsed time: $(($TIME_DIFF / 60))m$(($TIME_DIFF % 60))s"
exit
|
kangtastic/kgb
|
build.sh
|
Shell
|
gpl-2.0
| 3,646 |
#!/bin/sh
#
# Copyright 2003 Sun Microsystems, Inc. All Rights Reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
# CA 95054 USA or visit www.sun.com if you need additional information or
# have any questions.
#
#
#
#
if [ "${TESTSRC}" = "" ]
then
echo "TESTSRC not set. Test cannot execute. Failed."
exit 1
fi
echo "TESTSRC=${TESTSRC}"
if [ "${TESTJAVA}" = "" ]
then
echo "TESTJAVA not set. Test cannot execute. Failed."
exit 1
fi
echo "TESTJAVA=${TESTJAVA}"
if [ "${TESTCLASSES}" = "" ]
then
echo "TESTCLASSES not set. Test cannot execute. Failed."
exit 1
fi
echo "TESTCLASSES=${TESTCLASSES}"
echo "CLASSPATH=${CLASSPATH}"
JAVAC="${TESTJAVA}/bin/javac -g"
cp ${TESTSRC}/ExampleForClassPath.java ExampleForClassPath.java
${JAVAC} ExampleForClassPath.java
mkdir -p hidden
mv ExampleForClassPath.class hidden
rm -f ExampleForClassPath.java
|
TheTypoMaster/Scaper
|
openjdk/jdk/test/java/lang/instrument/AppendToClassPathSetUp.sh
|
Shell
|
gpl-2.0
| 1,692 |
#! /bin/sh
# Copyright (C) 2011-2013 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# parallel-tests support: redirection of file descriptors with
# AM_TESTS_FD_REDIRECT, for tests which are binary executables
# We use some tricks to ensure that all code paths in 'lib/am/check2.am'
# are covered, even on platforms where $(EXEEXT) would be naturally empty.
# See also the more generic test 'check-fd-redirect.sh', and
# sister test 'parallel-tests-fd-redirect.sh'.
required='cc native'
. test-init.sh
cat >> configure.ac << 'END'
AC_PROG_CC
# Calls like "write(9, ...)" are unlikely to work for MinGW-compiled
# programs. We must skip this test if this is the case.
am__ok=no
AC_LINK_IFELSE(
[AC_LANG_PROGRAM([[#include <unistd.h>]],
[[write (9, "foobar\n", 7); return 0;]])],
[AM_RUN_LOG([./conftest$EXEEXT 9>&1]) \
dnl Leading ":;" required to avoid having two nested subshells starting
dnl with '((' in the generated configure: that is unportable and could
dnl confuse some shells (e.g., NetBSD 5.1 /bin/ksh) into thinking we are
dnl trying to perform an arithmetic operation.
&& AM_RUN_LOG([:; (./conftest$EXEEXT 9>&1) | grep "^foobar"]) \
&& am__ok=yes])
test $am__ok = yes || AS_EXIT([63])
AM_CONDITIONAL([real_EXEEXT], [test -n "$EXEEXT"])
test -n "$EXEEXT" || EXEEXT=.bin
AC_OUTPUT
END
cat > Makefile.am << 'END'
AM_TESTS_FD_REDIRECT = 9>&1
TESTS = $(check_PROGRAMS)
check_PROGRAMS = baz qux.test
qux_test_SOURCES = zardoz.c
## Sanity check.
if !real_EXEEXT
check-local:
test -f baz.bin
test -f qux.test.bin
endif
END
$ACLOCAL
$AUTOCONF
$AUTOMAKE -a
cat > baz.c <<'END'
#include <stdio.h>
#include <unistd.h>
int main (void)
{
ssize_t res = write (9, " bazbazbaz\n", 11);
if (res < 0)
perror("write failed");
return res != 11;
}
END
cat > zardoz.c <<'END'
#include <stdio.h>
#include <unistd.h>
int main (void)
{
ssize_t res = write (9, " quxquxqux\n", 11);
if (res < 0)
perror("write failed");
return res != 11;
}
END
st=0; ./configure || st=$?
cat config.log # For debugging, as we do tricky checks in configure.
if test $st -eq 63; then
skip_ "fd redirect in compiled program unsupported"
elif test $st -eq 0; then
: Continue.
else
fatal_ "unexpected error in ./configure"
fi
# Sanity checks.
st=0
grep '^baz\.log:.*baz\$(EXEEXT)' Makefile || st=1
grep '^\.test\$(EXEEXT)\.log:' Makefile || st=1
grep '^qux\.log:' Makefile && st=1
test $st -eq 0 || fatal_ "doesn't cover expected code paths"
run_make -O -e IGNORE check
cat baz.log
cat qux.log
test $am_make_rc_got -eq 0
grep "^ bazbazbaz$" stdout
grep "^ quxquxqux$" stdout
$EGREP '(bazbazbaz|quxquxqux)' *.log && exit 1
:
|
sugarlabs/automake
|
t/parallel-tests-fd-redirect-exeext.sh
|
Shell
|
gpl-2.0
| 3,275 |
#! /bin/bash -e
# cvs-debi: Install current version of deb package
# cvs-debc: List contents of current version of deb package
#
# Based on debi/debc; see them for copyright information
# Based on cvs-buildpackage, copyright 1997 Manoj Srivastava
# (CVS Id: cvs-buildpackage,v 1.58 2003/08/22 17:24:29 srivasta Exp)
# This code is copyright 2003, Julian Gilbey <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
PROGNAME=`basename $0 .sh` # .sh for debugging purposes
usage () {
if [ "$PROGNAME" = cvs-debi ]; then usage_i
elif [ "$PROGNAME" = cvs-debc ]; then usage_c
else echo "Unrecognised invocation name: $PROGNAME" >&2; exit 1
fi;
}
usage_i () {
echo \
"Usage: $PROGNAME [options] [package ...]
Install the .deb file(s) just created by cvs-buildpackage or cvs-debuild,
as listed in the .changes file generated on that run. If packages are
listed, only install those specified binary packages from the .changes file.
Note that unlike cvs-buildpackage, the only way to specify the
source package name is with the -P option; you cannot simply have it
as the last parameter.
Also uses the cvs-buildpackage configuration files to determine the
location of the build tree, as described in the manpage.
Available options:
-M<module> CVS module name
-P<package> Package name
-V<version> Package version
-T<tag> CVS tag to use
-R<root dir> Root directory
-W<work dir> Working directory
-x<prefix> CVS default module prefix
-a<arch> Search for .changes file made for Debian build <arch>
-t<target> Search for .changes file made for GNU <target> arch
--help Show this message
--version Show version and copyright information
Other cvs-buildpackage options will be silently ignored."
}
usage_c () {
echo \
"Usage: $PROGNAME [options] [package ...]
Display the contents of the .deb file(s) just created by
cvs-buildpackage or cvs-debuild, as listed in the .changes file generated
on that run. If packages are listed, only display those specified binary
packages from the .changes file.
Note that unlike cvs-buildpackage, the only way to specify the
source package name is with the -P option; you cannot simply have it
as the last parameter.
Also uses the cvs-buildpackage configuration files to determine the
location of the build tree, as described in its manpage.
Available options:
-M<module> CVS module name
-P<package> Package name
-V<version> Package version
-T<tag> CVS tag to use
-R<root dir> Root directory
-W<work dir> Working directory
-x<prefix> CVS default module prefix
-a<arch> Search for .changes file made for Debian build <arch>
-t<target> Search for .changes file made for GNU <target> arch
--help Show this message
--version Show version and copyright information
Other cvs-buildpackage options will be silently ignored."
}
version () { echo \
"This is $PROGNAME, from the Debian devscripts package, version ###VERSION###
This code is copyright 2003, Julian Gilbey <[email protected]>,
all rights reserved.
Based on original code by Christoph Lameter and Manoj Srivastava.
This program comes with ABSOLUTELY NO WARRANTY.
You are free to redistribute this code under the terms of
the GNU General Public License, version 2 or later."
}
setq() {
# Variable Value Doc string
if [ "x$2" = "x" ]; then
echo >&2 "$progname: Unable to determine $3"
exit 1;
else
if [ ! "x$Verbose" = "x" ]; then
echo "$progname: $3 is $2";
fi
eval "$1=\"\$2\"";
fi
}
# Is cvs-buildpackage installed?
if ! command -v cvs-buildpackage >/dev/null 2>&1; then
echo "$PROGNAME: need the cvs-buildpackage package installed to run this" >&2
exit 1
fi
# Long term variables, which may be set in the cvsdeb config file or the
# environment:
# rootdir workdir (if all original sources are kept in one dir)
TEMPDIR=$(mktemp -dt cvs-debi.XXXXXXXX) || {
echo "$PROGNAME: unable to create temporary directory" >&2
echo "Aborting..." >&2
exit 1
}
TEMPFILE=$TEMPDIR/cl-tmp
trap "rm -f $TEMPFILE; rmdir $TEMPDIR" 0 1 2 3 7 10 13 15
TAGOPT=
# Command line; will bomb out if unrecognised options
TEMP=$(getopt -a -s bash \
-o hC:EH:G:M:P:R:T:U:V:W:Ff:dcnr:x:Bp:Dk:a:Sv:m:e:i:I:t: \
--long help,version,ctp,tC,sgpg,spgp,us,uc,op \
--long si,sa,sd,ap,sp,su,sk,sr,sA,sP,sU,sK,sR,ss,sn \
-n "$PROGNAME" -- "$@")
eval set -- $TEMP
while true ; do
case "$1" in
-h|--help) usage; exit 0 ; shift ;;
--version) version; exit 0 ; shift ;;
-M) opt_cvsmodule="$2" ; shift 2 ;;
-P) opt_package="$2" ; shift 2 ;;
-R) opt_rootdir="$2" ; shift 2 ;;
-T) opt_tag="$2" ; shift 2 ;;
-V) opt_version="$2" ; shift 2 ;;
-W) opt_workdir="$2" ; shift 2 ;;
-x) opt_prefix="$2" ; shift 2 ;;
-a) targetarch="$2" ; shift 2 ;;
-t) if [ "$2" != "C" ]; then targetgnusystem="$2"; fi
shift 2 ;;
# everything else is silently ignored
-[CHfGUr]) shift 2 ;;
-[FnE]) shift ;;
--ctp|--op|--tC) shift ;;
-[dDBbS]) shift ;;
-p) shift 2 ;;
--us|--uc|--sgpg|--spgp) shift ;;
--s[idapukrAPUKRns]) shift ;;
--ap) shift ;;
-[kvmeiI]) shift 2 ;;
--) shift ; break ;;
*) echo >&2 "Internal error! ($1)"
usage; exit 1 ;;
esac
done
if [ "x$opt_cvsmodule" = "x" -a "x$opt_package" = "x" -a \
! -e 'debian/changelog' ] ; then
echo >&2 "$progname should be run in the top working directory of"
echo >&2 "a Debian Package, or an explicit package (or CVS module) name"
echo >&2 "should be given."
exit 1
fi
if [ "x$opt_tag" != "x" ]; then
TAGOPT=-r$opt_tag
fi
# Command line, env variable, config file, or default
# This anomalous position is in case we need to check out the changelog
# below (anomalous since we have not loaded the config file yet)
if [ ! "x$opt_prefix" = "x" ]; then
prefix="$opt_prefix"
elif [ ! "x$CVSDEB_PREFIX" = "x" ]; then
prefix="$CVSDEB_PREFIX"
elif [ ! "x$conf_prefix" = "x" ]; then
prefix="$conf_prefix"
else
prefix=""
fi
# put a slash at the end of the prefix
if [ "X$prefix" != "X" ]; then
prefix="$prefix/";
prefix=`echo $prefix | sed 's://:/:g'`;
fi
if [ ! -f CVS/Root ]; then
if [ "X$CVSROOT" = "X" ]; then
echo "no CVS/Root file found, and CVSROOT var is empty" >&2
exit 1
fi
else
CVSROOT=$(cat CVS/Root)
export CVSROOT
fi
if [ "x$opt_package" = "x" ]; then
# Get the official package name and version.
if [ -f debian/changelog ]; then
# Ok, changelog exists
setq "package" \
"`dpkg-parsechangelog | sed -n 's/^Source: //p'`" \
"source package"
setq "version" \
"`dpkg-parsechangelog | sed -n 's/^Version: //p'`" \
"source version"
elif [ "x$opt_cvsmodule" != "x" ]; then
# Hmm. Well, see if we can checkout the changelog file
rm -f $TEMPFILE
cvs -q co -p $TAGOPT $opt_cvsmodule/debian/changelog > $TEMPFILE
setq "package" \
"`dpkg-parsechangelog -l$TEMPFILE | sed -n 's/^Source: //p'`" \
"source package"
setq "version" \
"`dpkg-parsechangelog -l$TEMPFILE | sed -n 's/^Version: //p'`" \
"source version"
rm -f "$TEMPFILE"
else
# Well. We don't know what this package is.
echo >&2 " This does not appear be a Debian source tree, since"
echo >&2 " theres is no debian/changelog, and there was no"
echo >&2 " package name or cvs module given on the comand line"
echo >&2 " it is hard to figure out what the package name "
echo >&2 " should be. I give up."
exit 1
fi
else
# The user knows best; package name is provided
setq "package" "$opt_package" "source package"
# Now, the version number
if [ "x$opt_version" != "x" ]; then
# All hail the user provided value
setq "version" "$opt_version" "source package"
elif [ -f debian/changelog ]; then
# Fine, see what the changelog says
setq "version" \
"`dpkg-parsechangelog | sed -n 's/^Version: //p'`" \
"source version"
elif [ "x$opt_cvsmodule" != "x" ]; then
# Hmm. The CVS module name is known, so lets us try exporting changelog
rm -f $TEMPFILE
cvs -q co -p $TAGOPT $opt_cvsmodule/debian/changelog > $TEMPFILE
setq "version" \
"`dpkg-parsechangelog -l$TEMPFILE | sed -n 's/^Version: //p'`" \
"source version"
rm -f "$TEMPFILE"
else
# Ok, try exporting the package name
rm -f $TEMPFILE
cvsmodule="${prefix}$package"
cvs -q co -p $TAGOPT $cvsmodule/debian/changelog > $TEMPFILE
setq "version" \
"`dpkg-parsechangelog -l$TEMPFILE | sed -n 's/^Version: //p'`" \
"source version"
rm -f "$TEMPFILE"
fi
fi
rm -f $TEMPFILE
rmdir $TEMPDIR
trap "" 0 1 2 3 7 10 13 15
non_epoch_version=$(echo -n "$version" | perl -pe 's/^\d+://')
upstream_version=$(echo -n "$non_epoch_version" | sed -e 's/-[^-]*$//')
debian_version=$(echo -n $non_epoch_version | perl -nle 'm/-([^-]*)$/ && print $1')
# The default
if [ "X$opt_rootdir" != "X" ]; then
rootdir="$opt_rootdir"
else
rootdir='/usr/local/src/Packages'
fi
if [ "X$opt_workdir" != "X" ]; then
workdir="$opt_workdir"
else
workdir="$rootdir/$package"
fi
# Load site defaults and over rides.
if [ -f /etc/cvsdeb.conf ]; then
. /etc/cvsdeb.conf
fi
# Load user defaults and over rides.
if [ -f ~/.cvsdeb.conf ]; then
. ~/.cvsdeb.conf
fi
# Command line, env variable, config file, or default
if [ ! "x$opt_rootdir" = "x" ]; then
rootdir="$opt_rootdir"
elif [ ! "x$CVSDEB_ROOTDIR" = "x" ]; then
rootdir="$CVSDEB_ROOTDIR"
elif [ ! "x$conf_rootdir" = "x" ]; then
rootdir="$conf_rootdir"
fi
# Command line, env variable, config file, or default
if [ ! "x$opt_workdir" = "x" ]; then
workdir="$opt_workdir"
elif [ ! "x$CVSDEB_WORKDIR" = "x" ]; then
workdir="$CVSDEB_WORKDIR"
elif [ ! "x$conf_workdir" = "x" ]; then
workdir="$conf_workdir"
else
workdir="$rootdir/$package"
fi
if [ ! -d "$workdir" ]; then
echo >&2 "The working directory, $workdir, does not exist. Aborting."
if [ ! -d "$rootdir" ]; then
echo >&2 "The root directory, $rootdir, does not exist either."
fi
exit 1;
fi
# The next part is based on debi
setq arch "`dpkg-architecture -a${targetarch} -t${targetgnusystem} -qDEB_HOST_ARCH`" "build architecture"
pva="${package}_${non_epoch_version}_${arch}"
changes="$pva.changes"
cd $workdir || {
echo "Couldn't cd $workdir. Aborting" >&2
exit 1
}
if [ ! -r "$changes" ]; then
echo "Can't read $workdir/$changes! Have you built the package yet?" >&2
exit 1
fi
# Just call debc/debi respectively, now that we have a changes file
SUBPROG=${PROGNAME#cvs-}
exec $SUBPROG --check-dirname-level 0 $changes "$@"
|
nabetaro/devscripts-translation
|
scripts/cvs-debi.sh
|
Shell
|
gpl-2.0
| 11,798 |
#!/usr/bin/env bash
PROJECT_NAME="cmake"
CPLATFORM="ios"
CCPU="armeabi-v7a"
CCOMPILE="clang"
CVER="debug"
cd $(dirname $0)
mkdir -p ${PWD}/../../../build/${PROJECT_NAME}_${CPLATFORM}_${CCPU}_${CCOMPILE}_${CVER}/
cd ${PWD}/../../../build/${PROJECT_NAME}_${CPLATFORM}_${CCPU}_${CCOMPILE}_${CVER}/
rm -rf *
cmake -DCPLATFORM=${CPLATFORM} -DCCPU=${CCPU} -DCCOMPILE=${CCOMPILE} -DCVER=${CVER} -DBUILD_TESTS=ON -G"Xcode" -DCMAKE_BUILD_TYPE=${CVER} -DCMAKE_CONFIGURATION_TYPES=${CVER} -DCMAKE_TOOLCHAIN_FILE=${PWD}/../../${PROJECT_NAME}/toolchain/iOS.cmake ${PWD}/../../${PROJECT_NAME}
xcodebuild -project ${PROJECT_NAME}.xcodeproj -target ALL_BUILD -configuration ${CVER}
#cmake --build .
exit 0
|
liwangmj/make_guide
|
cmake/build/script/ios_armeabi-v7a_clang_debug.sh
|
Shell
|
gpl-2.0
| 695 |
#!/bin/bash
ansible-playbook -vvvvv -i localhost, site.yml -e region=us-east-1 -e global_region=us-west-2
|
alikins/ansible-bug-repro
|
var_include_23759/runme.sh
|
Shell
|
gpl-3.0
| 107 |
#!/bin/bash
./compile.sh --debug
cd ../www-client
./compile.sh --debug
cd ../server
export NODE_PATH=$NODE_PATH:app/app-lib
./run.sh
node --harmony app/scripts/debug/watch.js
|
prakhar1989/fora
|
server/debug.sh
|
Shell
|
gpl-3.0
| 175 |
#!/bin/bash
date=`date +%s`
tar -cvvjpf oacdp-overlay-$date.tar.bz2 out/5054part out/5867part out/ba4 out/damage out/ebersp out/images out/pdfs out/so42 out/solex out/tools out/wog69 out/wog72 out/zips
|
elliottjohnson/oacdp
|
scripts/generate-overlay.sh
|
Shell
|
gpl-3.0
| 204 |
export MAVEN_REPO=/Users/tryggvil/.maven/repository
java -classpath target/classes:$MAVEN_REPO/axis/jars/axis-1.3.jar:$MAVEN_REPO/axis/jars/axis-jaxrpc-1.3.jar:$MAVEN_REPO/commons-logging/jars/commons-logging-api-1.0.4.jar:$MAVEN_REPO/commons-discovery/jars/commons-discovery-0.2.jar:$MAVEN_REPO/axis/jars/axis-wsdl4j-1.5.1.jar:$MAVEN_REPO/axis/jars/axis-saaj-1.3.jar org.apache.axis.wsdl.WSDL2Java -o src/java src/xsd/Fasteignaskra.xsd
#java -jar $MAVEN_REPO/com.sun.xml.bind/jars/jaxb-xjc-2.0.1.jar -classpath $MAVEN_REPO/com.sun.xml.bind/jars/jaxb-impl-2.0.1.jar -o src/java src/xsd/Fasteignaskra.xsd
|
idega/is.fmr.landskra
|
generateFasteignaskraXsd.sh
|
Shell
|
gpl-3.0
| 605 |
export GOPATH=$GOPATH:/home/wenduowang/git/git_home/go
|
wenduowang/git_home
|
go/add_gopath.sh
|
Shell
|
gpl-3.0
| 55 |
#!/usr/bin/env bash
TESTER_BACKENDS=verilator sbt "test:runMain SingleCycle.Launcher $1"
|
zavs/ercesiMIPS
|
run-single.bak.v.sh
|
Shell
|
gpl-3.0
| 90 |
#!/bin/bash
CORPUSFOLDER=$HOME/corpusak/for_better_for_verse/
CORPUSREADERFOLDER=$HOME/language-technology/poetrycorpusreader/4B4V
#NOTE! REMEMBER "THE TYGER". THERE ARE SOME &s that should be replaced by words
rm corpusFile
touch corpusFile
for file in $CORPUSFOLDER/poems/*.xml
do
echo "$file";
python $CORPUSREADERFOLDER/newCorpusReader.py "$file" hunpos >> corpusFile;
done
|
manexagirrezabal/herascansion
|
script.sh
|
Shell
|
gpl-3.0
| 382 |
#!/bin/bash
TESTN=Test027
DATADIR=data
OUTDIR=output
SRCDIR=../src
diff $OUTDIR/Test023/lf_nMImat.out $DATADIR/test.lf_nMImat.out > $OUTDIR/$TESTN/$TESTN.log
|
jkleinj/GSAtools
|
tests/Test027.sh
|
Shell
|
gpl-3.0
| 159 |
check_pkg_moarvm() {
local br_version=$(grep -E "^${1^^}_VERSION = " package/${1}/${1}.mk | awk '{print $3}')
local version=$(wget -q -O - http://moarvm.com/releases/ | grep -o -E "[0-9]{4}\.[0-9]{2}" | head -n1)
if [[ "$br_version" != "$version" ]]; then
if [[ "$br_version" != "" ]] && [[ "$version" != "" ]]; then
packages="$packages $1"
br_versions="$br_versions $br_version"
versions="$versions $version"
else
echo "Warning: $1 code has a problem."
fi
fi
unset br_version
unset version
}
|
vriera/check-br-package-versions
|
packages/moarvm.sh
|
Shell
|
gpl-3.0
| 520 |
#!/bin/sh
java -cp "./@jar.name@:./lib/*" com.seyhanproject.pservice.Service
|
seyhanp/seyhan-pservice
|
script/run.sh
|
Shell
|
gpl-3.0
| 78 |
#!/bin/bash
set -x
###
# This script runs in batch j-dump sessions to collect result summaries from
# multiple jenkins jobs.
# It will produce as output two html files per job:
# - ${job}_builds.html
# - ${job}_failures.html
#
# The jobs are grouped under CI_NICKs labels. You can collect results under
# multiple CI_NICKs and from different Jenkins instances.
# For more info check jdump.cfg
###
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
CFG="jdcron.cfg"
if ! source $DIR/$CFG; then
echo "Cannot read config file in $CFG, exitting."
exit 1
fi
mkdir -p "$OUTPUT_DIR"
# wrap the code to be run exclusively with parentheses
(
# wait at most 10 seconds for lockfile (fd 200) to be released, if not exit 1
flock -x -w 10 200 || exit 1
log() {
echo "$*" >> "$LOG_FILE"
}
index_html_heading() {
echo -e '<!DOCTYPE html>\n' \
'<html>\n' \
' <head>\n' \
' <style>\n' \
' * { font-family:arial, sans-serif; }\n' \
' header { padding-left:30px; font-size:20px; }\n' \
' nav { float:left; width:20%; padding-left:10px; }\n' \
' article { float:left; width:75%; padding-left:20px; }\n' \
' @media (max-width:800px) {\n' \
' nav,article { width:100%; height:auto; }\n' \
' }\n' \
' </style>\n' \
' </head>\n' \
" <link rel=\"icon\" href=\"$NM_ICON_FILE\">\n" \
' <body>\n' \
' <header>\n' \
" <h1><img style=\"width:235px; height:75px; padding-right:50px\" src=\"$NM_LOGOTYPE_FILE\" alt=\"NetworkManager\" align=\"bottom\">CI results</h1>\n" \
' </header>\n' \
' <section>\n' \
' <nav>\n' \
> $HTML_INDEX_FILE
}
js_heading() {
echo "var projects = [" > $JS_CONFIG_FILE
}
index_html_ci_begin() {
ci_nick="$1"
echo -e " <h2>${ci_nick}</h2>\n" \
' <ul style="list-style-type: none;">\n' \
>> $HTML_INDEX_FILE
}
index_html_ci_end() {
echo -e ' </ul>\n' >> $HTML_INDEX_FILE
}
index_html_add_entry() {
ref="$1_builds.html"
name="$2"
if [ "$3" == "green" ]; then
style="color:green; border:1px solid green; background-color:#ddffdd;"
elif [ "$3" == "black" ]; then
style="color:black; border:1px solid black; background-color:#dddddd;"
else
style="color:red; border:1px solid red; background-color:#ffdddd;"
fi
hl="$5"
unset health
if [ $hl -eq 1 ] || [ $hl -eq 0 ] ; then
health=$NM_HEALTH_FILE1
elif [ $hl -eq 2 ]; then
health=$NM_HEALTH_FILE2
elif [ $hl -eq 3 ]; then
health=$NM_HEALTH_FILE3
elif [ $hl -eq 4 ]; then
health=$NM_HEALTH_FILE4
elif [ $hl -eq 5 ]; then
health=$NM_HEALTH_FILE5
fi
if [ -n "$health" ]; then
health="<img style=\"width:20px; height:20px; padding-right:20px; margin-top:5px; margin-bottom:-5px;\" src=\"$health\">"
fi
echo -n " <li>${health}<a style=\"text-decoration:none; border-radius:2px; padding:0 3px; ${style}\" href=${ref} target=\"iframe_res\">${name}" >> $HTML_INDEX_FILE
for i in `seq $4`; do
echo -n " <b>*</b>" >> $HTML_INDEX_FILE
done
echo '</a></li>' >> $HTML_INDEX_FILE
}
js_add_entry() {
cat << EOF >> $JS_CONFIG_FILE
{
project:"$1",
name:"$2",
os:"$3",
},
EOF
}
index_html_trailing() {
echo -e ' </nav>\n' \
" <article><iframe name=\"iframe_res\" width=100% height=1000px style=\"border:none\">\n" \
' </section>\n' \
' </body>\n' \
'</html>' \
>> $HTML_INDEX_FILE
}
js_trailing() {
# end projects array, output health images names (image 0 and 1 are the same)
cat << EOF >> $JS_CONFIG_FILE
];
var health_img = [
"$NM_HEALTH_FILE1",
"$NM_HEALTH_FILE1",
"$NM_HEALTH_FILE2",
"$NM_HEALTH_FILE3",
"$NM_HEALTH_FILE4",
"$NM_HEALTH_FILE5",
];
EOF
}
process_job() {
local NICK="$1"
eval local CI_NICK_LABEL="\"\$${NICK}_CI_NICK_LABEL CI\""
eval local USER="\"\$${NICK}_USER\""
eval local PASSWORD="\"\$${NICK}_PASSWORD\""
eval local JENKINS_URL="\"\$${NICK}_JENKINS_URL\""
eval local JOB_HEADER="\"\$${NICK}_JOB_HEADER\""
eval local JOBS="\"\$${NICK}_JOBS\""
unset JDUMP_OPTIONS
[ -n "$USER" -a -n "$PASSWORD" ] && JDUMP_OPTIONS="--user $USER --password $PASSWORD"
log "*** $CI_NICK_LABEL ***"
index_html_ci_begin "$CI_NICK_LABEL"
for job in $JOBS
do
JOB_FULL_NAME="${JOB_HEADER}${job}"
[ -n "$JOB_HEADER" ] && JDUMP_JOB_NAME="--name ${job%-upstream}" || unset JDUMP_JOB_NAME
$JDUMP_BIN $JDUMP_OPTIONS $JDUMP_JOB_NAME "$JENKINS_URL" "$JOB_FULL_NAME" >> "$LOG_FILE" 2>&1
color="$(grep -v 'RUNNING' ${JOB_FULL_NAME}_builds.html | grep -m 1 '<tr><td>' | grep -o -e green -e black )"
running="$(grep -o 'RUNNING' ${JOB_FULL_NAME}_builds.html | wc -l)"
health="$(grep -v 'RUNNING' ${JOB_FULL_NAME}_builds.html | grep -m 5 '<tr><td>' |grep SUCCESS |wc -l) "
index_html_add_entry "$JOB_FULL_NAME" "${job%-upstream}" "$color" "$running" "$health"
js_add_entry "$JOB_FULL_NAME" "${job%-upstream}" "$CI_NICK_LABEL"
done
index_html_ci_end
}
[ -f "$NM_LOGOTYPE_FILE" ] && cp "$NM_LOGOTYPE_FILE" "$OUTPUT_DIR"
[ -f "$NM_ICON_FILE" ] && cp "$NM_ICON_FILE" "$OUTPUT_DIR"
[ -f "$NM_HEALTH_FILE1" ] && cp "$NM_HEALTH_FILE1" "$OUTPUT_DIR"
[ -f "$NM_HEALTH_FILE2" ] && cp "$NM_HEALTH_FILE2" "$OUTPUT_DIR"
[ -f "$NM_HEALTH_FILE3" ] && cp "$NM_HEALTH_FILE3" "$OUTPUT_DIR"
[ -f "$NM_HEALTH_FILE4" ] && cp "$NM_HEALTH_FILE4" "$OUTPUT_DIR"
[ -f "$NM_HEALTH_FILE5" ] && cp "$NM_HEALTH_FILE5" "$OUTPUT_DIR"
cd "$OUTPUT_DIR"
log "-----------------------------------------------------------------"
log `date`
log "-----------------------------------------------------------------"
index_html_heading
js_heading
for nick in $CI_NICK_LIST; do
process_job "$nick"
done
index_html_trailing
js_trailing
mv -f $OUTPUT_DIR/*.* $FINAL_DIR
cp -r $OUTPUT_DIR/cache $FINAL_DIR
[ "$?" = "0" ] && log "*** Success ***"
log "@@-------------------------------------------------------------@@"
) 200>$OUTPUT_DIR/jdcronlock
|
NetworkManager/NetworkManager-ci
|
run/utils/j-dump/jdcron.sh
|
Shell
|
gpl-3.0
| 5,941 |
class=$1
c=$2
loss=$3
hedges=$4
trainFile=svmlight/$class.train.svmlight$hedges
testFile=svmlight/$class.res.svmlight$hedges
modelFile=models/$class/SVMPerf.model.$loss.$c$hedges
outputFile=../data/res-and-qrels/results/$class/results.txt.SVMPerf.$loss.$c$hedges
svm_perf_learn -c $c -l $loss -w 3 --p 0.1 $trainFile $modelFile
svm_perf_classify $testFile $modelFile $outputFile
|
fmance/deep-medical-ir
|
classification/language-model/svmlearn.sh
|
Shell
|
gpl-3.0
| 381 |
#!/bin/sh
HERE="`dirname $0`"
SNF_VMCATCHER_HOME=${SNF_VMCATCHER_HOME:-$HERE}
java -jar $SNF_VMCATCHER_HOME/snf-vmcatcher.jar parse-image-list -image-list-url file:fedcloud.egi.eu-image.list
|
grnet/snf-vmcatcher
|
dev-parse-image-list.sh
|
Shell
|
gpl-3.0
| 193 |
#!/usr/bin/env bash
#
# Elite Dangerous data copier
####################################
# Enabling colours
RED=$(tput setaf 1)
GREEN=$(tput setaf 2)
YELLOW=$(tput setaf 3)
BLUE=$(tput setaf 4)
NORMAL=$(tput sgr0)
BRIGHT=$(tput bold)
# Folders
edlogtarget="${HOME}/elitedata/"
edlogtargetlog="${edlogtarget}logs/"
edlogtargetimg="${edlogtarget}images-todo/"
bkphost="raven.birdsnest.lan"
bkpfolder="/mnt/Backup/EliteDangerousFlightLogs/"
imgfolder='/Pictures/Frontier Developments/Elite Dangerous'
# Header
printf "${BLUE}..:: Elite Dangerous log & image copier ::..${NORMAL}\n"
# Create dir if it doesn't exist
if [[ ! -d "$edlogtarget" ]]
then
mkdir -p $edlogtarget{logs,images}
fi
# Step 1 - Check verbise logging
printf "\n${BLUE}Checking if verbose logging has been enabled: ${NORMAL}"
vlog=$( grep VerboseLogging '/Users/jgerritse/Library/Application Support/Steam/steamapps/common/Elite Dangerous/Products/FORC-FDEV-D-1010/EliteDangerous.app/Contents/Resources/AppConfig.xml' | wc -l )
if [[ "$vlog" -eq 1 ]]
then
printf "${GREEN}YES${NORMAL}\n"
elif [[ "$vlog" -eq 0 ]]
then
printf "${RED}NO${NORMAL}\n"
else
printf "${YELLOW}FAIL${NORMAL}\n"
fi
# Step 2 - Backup of the key bindings
# First, fetch the most recent backup file
latestkbbkp=$(ls -lrt ${edlogtarget}Custom.1.8.binds* | tail -n 1 | awk '{ print $9 }')
if [[ $(diff '/Users/jgerritse/Library/Application Support/Frontier Developments/Elite Dangerous/Options/Bindings/Custom.1.8.binds' ${edlogtarget}${latestkbbkp} | wc -l) -gt 0 ]]
then
# Key bindings have cahnged. Time to back them up!
printf "\n${BLUE}Making a backup copy of the key bindings file: ${NORMAL}"
if cp '/Users/jgerritse/Library/Application Support/Frontier Developments/Elite Dangerous/Options/Bindings/Custom.1.8.binds' ${edlogtarget}Custom.1.8.binds-$(date +%y%m%d)
then
printf "${GREEN}OK${NORMAL}\n"
else
printf "${RED}FAIL${NORMAL}\n"
fi
fi
# Step 3 - Copy and process the logs
printf "\n${BLUE}Copy all logs to the elitelogs directory${NORMAL}\n"
# First delete de debuglogs
find '/Users/jgerritse/Library/Application Support/Frontier Developments/Elite Dangerous/Logs/' -type f -name 'debugOutput*log' -delete
# Then copy the rest
rsync --archive --progress --exclude '*dmp' --exclude 'Config*' '/Users/jgerritse/Library/Application Support/Frontier Developments/Elite Dangerous/Logs/' $edlogtargetlog
# Make sure I keep my logs for safekeeping
printf "\n${BLUE}Upload logs and images to the backup folder on the SAN${NORMAL}\n"
rsync --archive --progress --exclude '.DS_Store' --exclude 'ZZ-Screenshot*' --delete $edlogtarget $bkphost:$bkpfolder
# Remove any files older than 14 days from the ED log dir
printf "\n${BLUE}Purge old logs from the Elite Dangerous log directory${NORMAL}\n"
if find '/Users/jgerritse/Library/Application Support/Frontier Developments/Elite Dangerous/Logs/' -type f -mtime +30 -delete
then
printf "${GREEN}OK${NORMAL}\n"
else
printf "${RED}FAIL${NORMAL}\n"
fi
# Step 4 - Copy the screenshots. They may need further processing / renaming
printf "\n${BLUE}Move new images to the elitelogs directory for further processing${NORMAL}\n"
rsync --archive --progress '/Users/jgerritse/Pictures/Frontier Developments/Elite Dangerous/' $edlogtargetimg
if [[ "$?" -eq 0 ]]
then
find '/Users/jgerritse/Pictures/Frontier Developments/Elite Dangerous/' -type f -name '*bmp' -delete
fi
# Converting the images
printf "\n${BLUE}Converting images to png${NORMAL}\n"
for image in $(find $edlogtargetimg -name '*bmp')
do
if [[ -z "$image" ]]
then
printf "Nothing to do.\n"
else
# Determine the filename
#imagename=$(basename $image | sed 's/\.[^.]*$//')
imagename="ZZ-Screenshot-$(date +%y%m%d-%H%M%S)"
printf " Processing $image: "
sips -s format png $image --out ${edlogtargetimg}${imagename}.png >/dev/null 2>&1
if [[ "$?" -eq 0 ]]
then
printf "${GREEN}OK${NORMAL}\n"
rm $image
else
printf "${RED}FAIL${NORMAL}\n"
fi
sleep 1 # In case processing took less than a second and the file is overwritten
fi
done
# Done
printf "\n${BLUE}DONE${NORMAL}\n"
|
tyhawk/tyhawk-shell
|
games/eddc.sh
|
Shell
|
gpl-3.0
| 4,074 |
#!/bin/sh
docker start docker-test
|
jianyingdeshitou/docker-debian8
|
docker-d8/test/start.sh
|
Shell
|
gpl-3.0
| 35 |
#!/bin/sh
cd ..
bin/rshell < Tests/cd_command.txt
|
elcain/rshell
|
tests/cd_command.sh
|
Shell
|
gpl-3.0
| 52 |
#!/bin/sh
bin=../src/apps
echo "*** dealer state odds, dynamic calculation, S17 ***"
$bin/bj-dealer-odds -n 1 -d -c -S
|
fangism/bj-jedi
|
test/dealer-odds-dynamic-composition-S17.sh
|
Shell
|
gpl-3.0
| 119 |
#!/bin/sh
#
# Slightly modified Oracle shutdown script
# it is meant to shutdown oracle services that systemd incorrectly thinks are stopped
# due to them being restarted manually
###
###################################
#
# usage: orashutdown $LISTENER_DB_SID
#
# Note:
# Use ORACLE_TRACE=T for tracing this script.
#
#####################################
if [ ! -f /etc/linearsoft/toolbag.conf ]; then
echo "Unable to locate LinearSoft toolbag conf file"
exit 1
fi
source /etc/linearsoft/toolbag.conf
source ${LSOFT_TOOLBAG_BASE}/oracle/systemd/init-db.sh
ORACLE_HOME_LISTNER=${ORACLE_HOME}
unset ORACLE_HOME
unset ORACLE_SID
# The this to bring down Oracle Net Listener
if [ ! $ORACLE_HOME_LISTNER ] ; then
echo "ORACLE_HOME_LISTNER is not SET, unable to auto-stop Oracle Net Listener"
else
# Set the ORACLE_HOME for the Oracle Net Listener, it gets reset to
# a different ORACLE_HOME for each entry in the oratab.
ORACLE_HOME=$ORACLE_HOME_LISTNER ; export ORACLE_HOME
# Stop Oracle Net Listener
if [ -f $ORACLE_HOME_LISTNER/bin/tnslsnr ] ; then
ps -ef | grep -i "[t]nslsnr" > /dev/null
if [ $? -eq 0 ]; then
echo "$0: Stoping Oracle Net Listener"
$ORACLE_HOME_LISTNER/bin/lsnrctl stop
else
echo "Oracle Net Listener not running."
fi
else
echo "Failed to auto-stop Oracle Net Listener using $ORACLE_HOME_LISTNER/bin/tnslsnr"
fi
fi
# Set this in accordance with the platform
ORATAB=/etc/oratab
if [ ! $ORATAB ] ; then
echo "$ORATAB not found"
exit 1;
fi
# Stops an instance
stopinst() {
ORACLE_SID=`echo $LINE | awk -F: '{print $1}' -`
if [ "$ORACLE_SID" = '*' ] ; then
ORACLE_SID=""
fi
###Check if DB is running
ps -ef | grep -i "[p]mon_${ORACLE_SID}" > /dev/null
if [ ! $? -eq 0 ]; then
echo "${INST} \"${ORACLE_SID}\" not running."
return 1
fi
# Called programs use same database ID
export ORACLE_SID
ORACLE_HOME=`echo $LINE | awk -F: '{print $2}' -`
# Called scripts use same home directory
export ORACLE_HOME
# Put $ORACLE_HOME/bin into PATH and export.
PATH=$ORACLE_HOME/bin:${SAVE_PATH} ; export PATH
# add for bug 652997
LD_LIBRARY_PATH=${ORACLE_HOME}/lib:${SAVE_LLP} ; export LD_LIBRARY_PATH
PFILE=${ORACLE_HOME}/dbs/init${ORACLE_SID}.ora
# See if it is a V6 or V7 database
VERSION=undef
if [ -f $ORACLE_HOME/bin/sqldba ] ; then
SQLDBA=sqldba
VERSION=`$ORACLE_HOME/bin/sqldba command=exit | awk '
/SQL\*DBA: (Release|Version)/ {split($3, V, ".") ;
print V[1]}'`
case $VERSION in
"6") ;;
*) VERSION="internal" ;;
esac
else
if [ -f $ORACLE_HOME/bin/svrmgrl ] ; then
SQLDBA=svrmgrl
VERSION="internal"
else
SQLDBA="sqlplus /nolog"
fi
fi
case $VERSION in
"6") sqldba command=shutdown ;;
"internal") $SQLDBA <<EOF
connect internal
shutdown immediate
EOF
;;
*) $SQLDBA <<EOF
connect / as sysdba
shutdown immediate
quit
EOF
;;
esac
if test $? -eq 0 ; then
echo "${INST} \"${ORACLE_SID}\" shut down."
else
echo "${INST} \"${ORACLE_SID}\" not shut down."
fi
}
#
# Loop for every entry in oratab file and and try to shut down
# that ORACLE
#
cat $ORATAB | while read LINE
do
case $LINE in
\#*) ;; #comment-line in oratab
*)
ORACLE_SID=`echo $LINE | awk -F: '{print $1}' -`
if [ "$ORACLE_SID" = "" ]; then #Empty line
continue
fi
if [ "$ORACLE_SID" = '*' ] ; then
# NULL SID - ignore
ORACLE_SID=""
continue
fi
TMP=`echo $ORACLE_SID | cut -b 1`
if [ "${TMP}" != '+' ]; then
INST="Database instance"
ORACLE_HOME=`echo $LINE | awk -F: '{print $2}' -`
echo "Processing $INST \"$ORACLE_SID\""
stopinst
fi
;;
esac
done
#
# Following loop shuts down 'ASM Instance[s]'
#
cat $ORATAB | while read LINE
do
case $LINE in
\#*) ;; #comment-line in oratab
*)
ORACLE_SID=`echo $LINE | awk -F: '{print $1}' -`
if [ "$ORACLE_SID" = "" ]; then
continue
fi
if [ "$ORACLE_SID" = '*' ] ; then
# NULL SID - ignore
ORACLE_SID=""
continue
fi
TMP=`echo $ORACLE_SID | cut -b 1`
if [ "${TMP}" = '+' ]; then
INST="ASM instance"
ORACLE_HOME=`echo $LINE | awk -F: '{print $2}' -`
echo "Processing $INST \"$ORACLE_SID\""
stopinst
fi
;;
esac
done
|
LinearSoft/linux-toolbag
|
oracle/systemd/orashutdown.sh
|
Shell
|
gpl-3.0
| 4,359 |
gpio write 0 1 && gpio write 1 1 && gpio write 2 1
gpio write 3 1 && gpio write 4 1 && gpio write 5 1
gpio write 6 1 && gpio write 7 1 && gpio write 8 1
gpio write 9 1 && gpio write 10 1 && gpio write 11 1
|
darkgeej/terminal-menu
|
gpio/gpioaon.sh
|
Shell
|
gpl-3.0
| 210 |
#!/bin/sh
exec chrome --incognito --disable-infobars $@ &
# exec firefox $@ &
|
mekanix/bin
|
browser.sh
|
Shell
|
gpl-3.0
| 79 |
#! /bin/sh
PROG=./lecture-temperature.py
uptime
d0=$(date +%s)
while true
do
d=$(date +%s)
printf "%d\t" $((d-d0))
${PROG}
sleep 1
done
|
GLMF/GLMFHS75
|
i2c_capteur/capture-temperatures.sh
|
Shell
|
gpl-3.0
| 145 |
#!/bin/sh
cobc -x -free -std=default -o bams bams.cbl createAuthCode.cbl
cobc -std=default -x -free ImportAttendees.cbl
cobc -std=default -x -free ExportAttendees.cbl
cobc -std=default -x -free BarnCampReport.cbl
|
mikebharris/BAMS
|
build.sh
|
Shell
|
gpl-3.0
| 214 |
#!/bin/bash
if [[ "$TRAVIS_PULL_REQUEST" == "false" && "$TRAVIS_BRANCH" == "master" ]]
then
curl -s -X POST -H "Content-Type: application/json" -H "Accept: application/json" -H "Travis-API-Version: 3" -H "Authorization: token $TRAVIS_TOKEN" -d '{ "request": { "branch":"master", "message": "Build Travis after changes in libamtrack/library" }}' https://api.travis-ci.com/repo/libamtrack%2FDockerFiles/requests
fi
|
libamtrack/library
|
distributions/JavaScript/trigger_docker_rebuild.sh
|
Shell
|
gpl-3.0
| 415 |
#! /bin/sh
# autogen for non-automake trees
#
# - it installs files: config.sub, config.guess, install-sh
# - it installs ltmain.sh, if LT_INIT or *LIBTOOL macro is used
#
set -e
USUAL_DIR="$1"
test -n "${USUAL_DIR}" || USUAL_DIR="."
test -f "${USUAL_DIR}/m4/usual.m4" || {
echo usage: $0 USUAL_DIR
exit 1
}
# default programs
ACLOCAL=${ACLOCAL:-aclocal}
AUTOCONF=${AUTOCONF:-autoconf}
AUTOHEADER=${AUTOHEADER:-autoheader}
# detect first glibtoolize then libtoolize
if test "x$LIBTOOLIZE" = "x"; then
LIBTOOLIZE=glibtoolize
which $LIBTOOLIZE >/dev/null 2>&1 \
|| LIBTOOLIZE=libtoolize
fi
#
# Workarounds for libtoolize randomness - it does not update
# the files if they exist, except it requires install-sh.
#
rm -f config.guess config.sub install-sh ltmain.sh libtool
cp -p ${USUAL_DIR}/mk/install-sh .
if ${LIBTOOLIZE} --help | grep "[-][-]install" > /dev/null; then
${LIBTOOLIZE} -i -f -q -c
else
${LIBTOOLIZE} -c
fi
# drop ltmain.sh if libtool is not used
grep -E 'LT_INIT|LIBTOOL' configure.ac > /dev/null \
|| rm -f ltmain.sh
# Now generate configure & config.h
${ACLOCAL} -I ${USUAL_DIR}/m4
grep AC_CONFIG_HEADER configure.ac > /dev/null \
&& ${AUTOHEADER}
${AUTOCONF}
# clean junk
rm -rf autom4te.* aclocal*
|
ycsoft/pgbouncer
|
lib/mk/std-autogen.sh
|
Shell
|
gpl-3.0
| 1,251 |
# Copyright 2017 The bluemun Authors. All rights reserved.
# Use of this source code is governed by a GNU GENERAL PUBLIC LICENSE
# license that can be found in the LICENSE file.
#!/bin/sh
running=0
function install {
go install github.com/bluemun/munfall
echo "engine installed"
go install github.com/bluemun/munfall/graphics
echo "engine/graphics installed"
go install github.com/bluemun/munfall/graphics/shader
echo "engine/graphics/shader installed"
go install github.com/bluemun/munfall/graphics/render
echo "engine/graphics/render installed"
go install github.com/bluemun/tetris
echo "Tetris installed"
}
while [ $running = 0 ]; do
echo -n "d: go get dependencies x: debug, b: install, r: run, a: install and run >"
read text
running=1
case $text in
"d" )
echo "Installing go-gl"
go get -u github.com/go-gl/gl/v{3.2,3.3,4.1,4.2,4.3,4.4,4.5}-{core,compatibility}/gl
echo "go-gl installed"
echo "Installing go-glfw"
go get -u github.com/go-gl/glfw/v3.2/glfw
echo "go-glfw installed"
echo "Installing go-mgl32"
go get github.com/go-gl/mathgl/mgl32
echo "go-mgl32 installed"
echo "Installing go-logging"
go get github.com/op/go-logging
echo "go-logging installed"
;;
"x" )
godebug run *.go
;;
"b" )
install;
;;
"r" )
$GOPATH/bin/tetris
;;
"a" )
install;
$GOPATH/bin/tetris
;;
* )
running=0
;;
esac
done
|
bluemun/tetris
|
dev.sh
|
Shell
|
gpl-3.0
| 1,495 |
#!/bin/bash
rm -f tmp/pids/server.pid
gem update bundle
bundle install
bundle update
yarn install
yarn upgrade
rails s -p 3000 -b '0.0.0.0'
|
alkcxy/BilancioFamiliare
|
entrypoint.sh
|
Shell
|
gpl-3.0
| 141 |
#!/bin/sh
# command line:
# sh create_LFMM_release.sh version_number
##################
# parameters #
##################
# name of the program
prog="LFMM"
# name of the directory to create
dir="LFMM_CL_v$1"
# list of the src directory to copy from replop/CL_code/code/src/
src_list="io lapack matrix LFMM convert stats tracyWidom pca"
# list of the executable to add
main_list="main_LFMM main_pca main_tracyWidom main_geno2lfmm main_lfmm2geno main_ped2lfmm main_vcf2geno main_ancestrymap2lfmm"
# color definition
VERT="\\033[1;32m" NORMAL="\\033[0;39m" ROUGE="\\033[1;31m" JAUNE="\\033[1;33m"
#############################
# creation of the directory #
#############################
echo "$VERT" "Creation of the directory $dir for $prog." "$NORMAL"
if [ -d $dir ]; then
echo "$ROUGE" "A directory called $dir already exists !"
echo "$ROUGE" "Creation Aborted." "$NORMAL"
exit 1
else
cp -r meta/$prog\_CL $dir
fi
echo "$VERT" "Entering $dir"
cd $dir
#############################
# add of the src files #
#############################
echo "\tAdd src files"
rm -r code/obj/*/
# create main
mkdir code/obj/main/
mkdir code/src/main/
# cp src files
for i in $src_list; do
echo "\t\tCopy of $i in $dir"
cp -r ../../../code/src/$i/ code/src/
mkdir code/obj/$i/
done
# cp main files
for i in $main_list; do
echo "\t\tCopy of $i in $dir"
cp ../../../code/src/main/$i.c code/src/main/
done
cp ../../../code/src/mainpage.h code/src/
##############################
# WARNING: UPDATE the README #
##############################
echo "$JAUNE" "\tWARNING: Do not FORGET to update the README files !!!"
#################################
# creation of the documentation #
#################################
echo "$VERT" "\tCompilation of the documentation" "$NORMAL"
cd ../../../documentation/$prog/obj/
rm note.pdf
pdflatex -interaction=batchmode note.tex
bibtex note.aux
pdflatex -interaction=batchmode note.tex
cp note.pdf ../note_$prog.pdf
cp ../note_$prog.pdf ../../../release/$prog/$dir/documentation/
cd ../../../release/$prog/$dir/
########################
# copy of the examples #
########################
echo "$VERT" "\tAdd examples files" "$NORMAL"
cp -r ../../../examples/LFMM/* examples/
echo "$VERT" "Leaving $dir" "$NORMAL"
cd ..
###########################
# creation of the archive #
###########################
echo "$VERT" "creation of the archive" "$NORMAL"
if [ -f $dir.tar.gz ]; then
echo "$ROUGE" "An archive called $dir.tar.gz already exists !"
echo "$ROUGE" "Creation Aborted." "$NORMAL"
rm -rf $dir
exit 1
else
tar -czf $dir.tar.gz $dir/
rm -rf $dir
fi
echo "$VERT" "Succesful creation of $prog release v$1" "$NORMAL"
|
frichote/replop
|
CL_code/release/LFMM/create_LFMM_release.sh
|
Shell
|
gpl-3.0
| 2,709 |
#!/bin/sh
./tetris_puzzle_solver -w 08 -h 8 -T 4 -J 4 -L 1 -O 3 -Z 1 -S 1 -I 2
./tetris_puzzle_solver -w 08 -h 6 -T 2 -J 3 -L 2 -O 2 -Z 2 -S 0 -I 1
./tetris_puzzle_solver -w 08 -h 6 -T 2 -J 2 -L 0 -O 2 -Z 3 -S 2 -I 1
./tetris_puzzle_solver -w 10 -h 4 -T 4 -J 1 -L 0 -O 0 -Z 2 -S 1 -I 2
./tetris_puzzle_solver -w 10 -h 4 -T 4 -J 0 -L 2 -O 2 -Z 0 -S 1 -I 1
./tetris_puzzle_solver -w 08 -h 5 -T 4 -J 1 -L 1 -O 0 -Z 2 -S 2 -I 0
./tetris_puzzle_solver -w 08 -h 5 -T 4 -J 1 -L 2 -O 1 -Z 0 -S 0 -I 2
./tetris_puzzle_solver -w 08 -h 5 -T 4 -J 1 -L 1 -O 0 -Z 2 -S 2 -I 0
./tetris_puzzle_solver -w 08 -h 5 -T 2 -J 1 -L 1 -O 2 -Z 1 -S 1 -I 2
|
mporsch/tetris_puzzle_solver
|
test.sh
|
Shell
|
gpl-3.0
| 632 |
if [ -z "$1" ]; then
PYTHON_VERSION=3
else
PYTHON_VERSION=$1
fi
apt-get update -qq
if [[ $(apt-cache search libsundials-serial | wc -l) -gt 0 ]]; then
SUNDIALS_BIN="libsundials-serial"
elif [[ $(apt-cache search libsundials-nvecserial2 | wc -l) -gt 0 ]] && [[ $(apt-cache search libsundials-cvode2 | wc -l) -gt 0 ]] && [[ $(apt-cache search libsundials-ida2 | wc -l) -gt 0 ]] ; then
SUNDIALS_BIN="libsundials-nvecserial2 libsundials-cvode2 libsundials-ida2"
else
SUNDIALS_BIN=""
fi
if [[ $(apt-cache search libsundials-serial-dev | wc -l) -gt 0 ]]; then
SUNDIALS_DEV="libsundials-serial-dev"
elif [[ $(apt-cache search libsundials-dev | wc -l) -gt 0 ]]; then
SUNDIALS_DEV="libsundials-dev"
else
SUNDIALS_DEV=""
fi
if [[ $(apt-cache search libatlas-dev | wc -l) -gt 0 ]]; then
ATLAS_DEV="libatlas-dev libatlas-base-dev"
elif [[ $(apt-cache search libatlas-base-dev | wc -l) -gt 0 ]]; then
ATLAS_DEV="libatlas-base-dev"
else
ATLAS_DEV=""
fi
if [[ "${PYTHON_VERSION}" == 2 ]]; then
PYTHON_PACKAGES="python-pip python-dev"
PIP_EXECUTABLE="pip2"
else
PYTHON_PACKAGES="python3-pip python3-dev"
PIP_EXECUTABLE="pip3"
fi
apt-get install -y libopenmpi-dev openmpi-bin \
${SUNDIALS_BIN} ${SUNDIALS_DEV} \
liblapack-dev libblas-dev ${ATLAS_DEV} \
${PYTHON_PACKAGES} make swig git
${PIP_EXECUTABLE} install -i https://pypi.python.org/simple pip --upgrade --ignore-installed
if [ -f /usr/bin/pip ]
then
mv /usr/bin/pip /usr/bin/pip.bak
fi
ln -s /usr/local/bin/pip /usr/bin/pip
if [ -f /usr/bin/${PIP_EXECUTABLE} ]
then
mv /usr/bin/${PIP_EXECUTABLE} /usr/bin/${PIP_EXECUTABLE}.bak
fi
ln -s /usr/local/bin/${PIP_EXECUTABLE} /usr/bin/${PIP_EXECUTABLE}
${PIP_EXECUTABLE} install setuptools --upgrade --ignore-installed
if [[ "${PYTHON_VERSION}" == 3 ]]; then
EASY_INSTALL=$(find /usr -name easy_install-3*)
if [[ -z "${EASY_INSTALL}" ]]; then
EASY_INSTALL="easy_install"
fi
elif [[ "${PYTHON_VERSION}" == 2 ]]; then
EASY_INSTALL=$(find /usr -name easy_install-2*)
if [[ -z "${EASY_INSTALL}" ]]; then
EASY_INSTALL="easy_install"
fi
else
EASY_INSTALL="easy_install"
fi
${EASY_INSTALL} --upgrade distribute
# Version incompatibility issue... Hopefully temporary
${PIP_EXECUTABLE} install pyopenssl
# Checking if mpicc is in /usr/bin
if [ ! -f /usr/bin/mpicc ] ; then
mpicc_path=$(echo $(find /usr -name mpicc) | cut -d' ' -f1)
ln -s ${mpicc_path} /usr/bin
fi
# Checking if mpirun is in /usr/bin
if [ ! -f /usr/bin/mpirun ] ; then
mpirun_path=$(echo $(find /usr -name mpirun) | cut -d' ' -f1)
ln -s ${mpirun_path} /usr/bin
fi
# Checking if libatlas is in /usr/lib
if [ ! -f /usr/lib/libatlas.so ] ; then
ATLAS_PATH=$(find /usr -name libatlas.so)
if [ -z "${ATLAS_PATH}" ] ; then
ATLAS_PATH=$(find /usr -name libtatlas.so)
fi
ln -s ${ATLAS_PATH} /usr/lib/libatlas.so
fi
|
vincent-noel/libSigNetSim
|
scripts/install_dep-debian.sh
|
Shell
|
gpl-3.0
| 2,995 |
#!/usr/bin/env bash
xgettext --files-from=po/POTFILES --directory=. --output=po/gahshomar.pot
msgmerge --update --no-fuzzy-matching --backup=off po/fa.po po/gahshomar.pot
|
Gahshomar/gahshomar
|
update_translations.sh
|
Shell
|
gpl-3.0
| 172 |
#!/bin/sh
#vcs=none
pkgname=icon-naming-utils
pkgver=0.8.90
urls="http://tango.freedesktop.org/releases/icon-naming-utils-${pkgver}.tar.bz2"
srctar=${pkgname}-${pkgver}.tar.bz2
relmon_id=1359
kiin_make() {
./configure --prefix=/usr \
--libexecdir=/usr/lib/icon-naming-utils
make
}
kiin_install() {
make DESTDIR=${pkgdir} install
}
|
alekseyrybalkin/kiin-repo
|
gui/icon-naming-utils/package.sh
|
Shell
|
gpl-3.0
| 354 |
sudo apt-get update
sudo rpi-update
sudo apt-get dist-upgrade
#Install apps
sudo apt-get install samba=2:3.6.6-6+deb7u5 \
samba-common-bin=2:3.6.6-6+deb7u5 \
nginx=1.2.1-2.2+wheezy3 \
libxml2-dev=2.8.0+dfsg1-7+wheezy3 \
mysql-server=5.5.41-0+wheezy1 \
mysql-client=5.5.41-0+wheezy1 \
php5=5.4.36-0+deb7u3 \
php5-common=5.4.36-0+deb7u3 \
php5-cgi=5.4.36-0+deb7u3 \
php5-mysql=5.4.36-0+deb7u3 \
php5-fpm=5.4.36-0+deb7u3 \
php5-mcrypt=5.4.36-0+deb7u3 \
php5-gd=5.4.36-0+deb7u3 \
ffmpeg=6:0.8.16-1+rpi1 \
zip=3.0-6 \
imagemagick=8:6.7.7.10-5+deb7u3 \
hostapd=1:1.0-3+deb7u1 \
udhcpd=1:1.20.0-7
#Update hostname
sudo cp /etc/hosts /etc/hosts.bak
sudo cp ./hostfiles/hosts /etc/hosts
sudo cp /etc/hostname /etc/hostname.bak
sudo cp ./hostfiles/hostname /etc/hostname
sudo /etc/init.d/hostname.sh
#Samba config
sudo cp /etc/samba/smb.conf /etc/samba/smb.conf.bak
sudo cp ./samba/smb.conf /etc/samba/smb.conf
sudo cp /etc/samba/dhcp.conf /etc/samba/dhcp.conf.bak
#sudo cp ./samba/dhcp.conf /etc/samba/dhcp.conf #This file no longer exists in the repo?
sudo /etc/samba/gdbcommands /etc/samba/gdbcommands.bak #Doesn't exist by default, but try to copy it anyways.
sudo cp ./samba/gdbcommands /etc/samba/gdbcommands
#Nginx config
sudo cp /etc/nginx/sites-enabled/default /etc/nginx/sites-enabled/default.bak
sudo cp ./nginx/sites-enabled/default /etc/nginx/sites-enabled/default
sudo cp /etc/php5/fpm/php-fpm.conf /etc/php5/fpm/php-fpm.conf.bak
sudo cp ./php5/fpm/php-fpm.conf /etc/php5/fpm/php-fpm.conf
sudo cp /etc/php5/fpm/php.ini /etc/php5/fpm/php.ini.bak
sudo cp ./php5/fpm/php.ini /etc/php5/fpm/php.ini
#Install base www files
sudo cp -r ./www/* /usr/share/nginx/www/
#Configure _h5ai
sudo mkdir /usr/share/nginx/www/_h5ai
sudo chmod 777 -R /usr/share/nginx/www/_h5ai/cache/
#Load the nginx config files
#(after this you should be able to visit the web server and see content)
sudo nginx -s reload
#Sphider
sudo chmod 777 -R /usr/share/nginx/www/rsphider
echo "CREATE DATABASE sphider_plus;" | mysql -u root -prachellovespie
sudo chmod 777 /usr/share/nginx/www/rsphider/admin/log
#Downlaod all the content
sudo chmod +x ./scripts/encontent.sh
./scripts/encontent.sh
#Create wifi hotspot
sudo cp /etc/udhcpd.conf /etc/udhcpd.conf.bak #dhcp server
sudo cp ./hotspot/udhcpd.conf /etc/udhcpd.conf
sudo cp /etc/default/udhcpd /etc/default/udhcpd.bak
sudo cp ./hotspot/udhcpd /etc/default/udhcpd
sudo cp /etc/default/ifplugd /etc/default/ifplugd.bak #disable ifplugd
sudo cp ./hotspot/ifplugd /etc/default/ifplugd
sudo cp /etc/default/hostapd /etc/default/hostapd.bak
sudo cp ./hotspot/hostapd /etc/default/hostapd
sudo cp /etc/network/interfaces /etc/network/interfaces.bak
sudo cp ./hotspot/interfaces /etc/network/interfaces
sudo cp /etc/hostapd/hostapd.conf /etc/hostapd/hostapd.conf.bak #Does not exist, but attempt to copy anyways...
sudo cp ./hotspot/hostapd.conf /etc/hostapd/hostapd.conf
sudo cp /etc/sysctl.conf /etc/sysctl.conf.bak
sudo cp ./hotspot/sysctl.conf /etc/sysctl.conf
sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
sudo iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
sudo iptables -A FORWARD -i eth0 -o wlan0 -m state --state RELATED,ESTABLISHED -j ACCEPT
sudo iptables -A FORWARD -i wlan0 -o eth0 -j ACCEPT
sudo sh -c "iptables-save > /etc/iptables.ipv4.nat"
sudo ifconfig wlan0 10.10.10.10
sudo service hostapd start
sudo service udhcpd start
sudo update-rc.d hostapd enable
sudo update-rc.d udhcpd enable
#webshutdown
sudo chmod +x ./scripts/ifupdown.sh
./scripts/ifupdown.sh
|
srosro/rachelpiOS
|
scripts/rachelpios.sh
|
Shell
|
gpl-3.0
| 3,907 |
#!/bin/sh
#From http://richelbilderbeek.nl/CppQtCrosscompileToWindowsExample15.htm
echo "Cross compiling to Windows"
echo "1/2: Creating Windows makefile"
i686-pc-mingw32-qmake ToolTestFunctionParserDesktop.pro
echo "2/2: making makefile"
make
echo "Done"
|
richelbilderbeek/TestFunctionParser
|
crosscompiletowindows.sh
|
Shell
|
gpl-3.0
| 261 |
#!/bin/bash
~/hercules/tools/mob_db.pl < ~/hercules/db/pre-re/mob_db.txt > ~/hercules/tools/mob_db.sql
mysql -u hercules -pragnarok -h localhost << EOF
USE hercules;
source ~/hercules/tools/mob_db.sql
EOF
|
v4Lo/OnlyRO
|
mob_db_update.sh
|
Shell
|
gpl-3.0
| 205 |
source distro/mint/cinnamon/config
_ver="${_ver}/${_de}"
source distro/mint/install.sh
|
mytbk/liveusb-builder
|
distro/mint/cinnamon/install.sh
|
Shell
|
gpl-3.0
| 87 |
#!/bin/sh -
if [ $# -lt 1 ]; then
echo "Usage: $0 CONTAINER"
exit 1
fi
# wait until container is running
while ! docker ps | grep -q "$1[^/]"
do
echo "Waiting container $1..."
sleep 3
done
sleep 2
|
nuxeo/nuxeo.io
|
tools/wait-container.sh
|
Shell
|
lgpl-2.1
| 212 |
#!/usr/bin/env sh
# generated from catkin/python/catkin/environment_cache.py
# based on a snapshot of the environment before and after calling the setup script
# it emulates the modifications of the setup script without recurring computations
# new environment variables
export CATKIN_TEST_RESULTS_DIR="/opt/ros/hydro/stacks/qbo_object_recognition/build/test_results"
export ROS_TEST_RESULTS_DIR="/opt/ros/hydro/stacks/qbo_object_recognition/build/test_results"
# modified environment variables
export CMAKE_PREFIX_PATH="/opt/ros/hydro/stacks/qbo_object_recognition/build/devel:$CMAKE_PREFIX_PATH"
export CPATH="/opt/ros/hydro/stacks/qbo_object_recognition/build/devel/include:$CPATH"
export LD_LIBRARY_PATH="/opt/ros/hydro/stacks/qbo_object_recognition/build/devel/lib:$LD_LIBRARY_PATH"
export PATH="/opt/ros/hydro/stacks/qbo_object_recognition/build/devel/bin:$PATH"
export PKG_CONFIG_PATH="/opt/ros/hydro/stacks/qbo_object_recognition/build/devel/lib/pkgconfig:$PKG_CONFIG_PATH"
export PYTHONPATH="/opt/ros/hydro/stacks/qbo_object_recognition/build/devel/lib/python2.7/dist-packages:$PYTHONPATH"
export ROSLISP_PACKAGE_DIRECTORIES="/opt/ros/hydro/stacks/qbo_object_recognition/build/devel/share/common-lisp"
export ROS_PACKAGE_PATH="/opt/ros/hydro/stacks/qbo_object_recognition:$ROS_PACKAGE_PATH"
|
HailStorm32/Q.bo_stacks
|
qbo_object_recognition/build/catkin_generated/setup_cached.sh
|
Shell
|
lgpl-2.1
| 1,302 |
#!/bin/bash
set -eu
# EPEL
rpm -Uvh http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
# ClamAV
yum install -y clamav clamd
# take off firewall (local virtual machine - ok)
iptables -F
# listen to our local IP, not only on localhost
echo 'TCPAddr 192.168.50.72' >> /etc/clamd.conf
service clamd restart
|
solita/clamav-java
|
env/clamd.sh
|
Shell
|
lgpl-2.1
| 332 |
#!/bin/bash
# add 2017-03-06 by Rainer Gerhards, released under ASL 2.0
. $srcdir/diag.sh init
. $srcdir/diag.sh generate-conf
. $srcdir/diag.sh add-conf '
module(load="../plugins/imtcp/.libs/imtcp")
input(type="imtcp" port="13514" ruleset="ruleset")
ruleset(name="ruleset" parser="rsyslog.rfc5424") {
action(type="omfile" file="rsyslog2.out.log")
}
'
. $srcdir/diag.sh startup-vg
. $srcdir/diag.sh tcpflood -m10
. $srcdir/diag.sh shutdown-when-empty
. $srcdir/diag.sh wait-shutdown-vg
# note: we just check the valgrind output, the log file itself does not
# interest us
. $srcdir/diag.sh exit
|
shane-lawrence/rsyslog
|
tests/no-parser-vg.sh
|
Shell
|
lgpl-3.0
| 597 |
# Author: Daniel Ortiz Mart\'inez
# *- bash -*
# Generates a corpus partition given a text file.
########
print_desc()
{
echo "thot_gen_partition written by Daniel Ortiz"
echo "thot_gen_partition generates a corpus partition given a text file"
echo "type \"thot_gen_partition --help\" to get usage information"
}
########
version()
{
echo "thot_gen_partition is part of the thot package"
echo "thot version "${version}
echo "thot is GNU software written by Daniel Ortiz"
}
########
usage()
{
echo "thot_gen_partition -s <string> t <string> -e <int> -n <int>"
echo " [--help] [--version]"
echo ""
echo "-s <string> File with source sentences"
echo "-t <string> File with target sentences"
echo "-e <int> Number of sentences to exclude from training"
echo "-n <int> Size of test and development corpora"
echo "--help Display this help and exit"
echo "--version Output version information and exit"
}
########
if [ $# -lt 1 ]; then
print_desc
exit 1
fi
# Read parameters
s_given=0
t_given=0
e_given=0
n_given=0
while [ $# -ne 0 ]; do
case $1 in
"--help") usage
exit 0
;;
"--version") version
exit 0
;;
"-s") shift
if [ $# -ne 0 ]; then
srcfile=$1
s_given=1
fi
;;
"-t") shift
if [ $# -ne 0 ]; then
trgfile=$1
t_given=1
fi
;;
"-e") shift
if [ $# -ne 0 ]; then
esize=$1
e_given=1
fi
;;
"-n") shift
if [ $# -ne 0 ]; then
dtsize=$1
n_given=1
fi
;;
esac
shift
done
# Check parameters
if [ ${s_given} -eq 0 ]; then
echo "Error! -s parameter not given" >&2
exit 1
else
if [ ! -f ${srcfile} ]; then
echo "Error! file ${srcfile} does not exist" >&2
exit 1
fi
fi
if [ ${t_given} -eq 0 ]; then
echo "Error! -t parameter not given" >&2
exit 1
else
if [ ! -f ${trgfile} ]; then
echo "Error! file ${trgfile} does not exist" >&2
exit 1
fi
fi
if [ ${e_given} -eq 0 ]; then
echo "Error! -e parameter not given" >&2
exit 1
fi
if [ ${n_given} -eq 0 ]; then
echo "Error! -n parameter not given" >&2
exit 1
fi
if [ `expr 2 \* ${dtsize}` -lt ${e_given} ]; then
echo "Error! 2 times -n option should be lower than the value of -e" >&2
exit 1
fi
# Obtain number of lines for files
nlsrc=`wc -l $srcfile | $AWK '{printf"%s",$1}'`
nltrg=`wc -l $trgfile | $AWK '{printf"%s",$1}'`
if [ $nlsrc -ne $nltrg ]; then
echo "Error! source and target files have a different number of lines" >&2
exit 1
fi
nl=$nlsrc
if [ $nl -lt $esize ]; then
echo "Error! value of -e is greater than the number of available sentences" >&2
exit 1
fi
## Process parameters
# Shuffle corpus
${bindir}/thot_shuffle 31415 /tmp ${srcfile} > ${srcfile}.shuff
${bindir}/thot_shuffle 31415 /tmp ${trgfile} > ${trgfile}.shuff
# Generate training and excluded sentences files
trsize=`expr $nl - $esize`
$HEAD -$trsize ${srcfile}.shuff > ${srcfile}.train
$HEAD -$trsize ${trgfile}.shuff > ${trgfile}.train
$TAIL -$esize ${srcfile}.shuff > ${srcfile}.excluded
$TAIL -$esize ${trgfile}.shuff > ${trgfile}.excluded
# Generate dev partition
$HEAD -$dtsize ${srcfile}.excluded > ${srcfile}.dev
$HEAD -$dtsize ${trgfile}.excluded > ${trgfile}.dev
# Generate test partition
tmp=`expr 2 \* $dtsize`
$HEAD -$tmp ${srcfile}.excluded | $TAIL -$dtsize > ${srcfile}.test
$HEAD -$tmp ${trgfile}.excluded | $TAIL -$dtsize > ${trgfile}.test
|
daormar/thot
|
utils/exper/thot_gen_partition.sh
|
Shell
|
lgpl-3.0
| 3,863 |
#/bin/bash
printf "Waiting postgresql startup "
until pg_isready >> /dev/null; do
printf "."
sleep 1
done
sleep 2
echo -e "\nDone"
|
raphaelrpl/docker
|
scidb/16.9/files/wait_for_postgres.sh
|
Shell
|
lgpl-3.0
| 138 |
#!/bin/bash
# Usage:
# ./webUpdate.sh
function updt {
dir="./"
repo_url="https://github.com/robotenique/roboteniqueWeb"
repo_name="roboteniqueWeb/"
echo "****************************************************************************"
echo "Updating Repo $repo_name with url: $repo_url"
echo "Starting update..."
main_branch="master"
if [ "$repo_url" == "git@someserver:repo/repo.git" ]; then # if you have a repo where the primary branch isnt master
$main_branch="trunk"
fi
# update the repo, then stash any local changes
echo -e "\ncalling: git clone (roboteniqueWeb)"
(git clone $repo_url)
echo -e "\nremoving: .git and .gitignore"
(rm -rf roboteniqueWeb/.g*)
(tar -cvzpf tmp.tar.gz roboteniqueWeb/*)
(mv roboteniqueWeb/tmp.tar.gz ./tmp.tar.gz)
(rm -rf roboteniqueWeb/*)
(rmdir roboteniqueWeb)
echo -e "\nRemoving old files..."
(ls | egrep -v "(tmp.tar.gz|webUpdate.sh)+" | xargs rm -rf)
echo -e "\nExtracting new files..."
(tar -xvzpf tmp.tar.gz --strip-components 1)
(rm tmp.tar.gz)
echo "$repo_name have been updated!"
}
#FKING SHELL
# TO DECLARE A VARIABLE: MUST NOT USE SPACE
# TO USE SQUARE BRACKETS IN IF: MUST DEFINITELY USE SPACE
# OUT!
var=1
var2=42
if [ "$var" -eq $# ]; then
if [ $1 -eq 42 ]; then
updt
else
echo "ERROR! TRY AGAIN"
fi
fi
exit 0
|
robotenique/RandomAccessMemory
|
shellScripts/webUpdate.sh
|
Shell
|
unlicense
| 1,299 |
#!/bin/bash
./app --spring.profiles.active=staging
|
hexa1/gogo-groovy-spring-boot
|
run-stage.sh
|
Shell
|
unlicense
| 52 |
mkdir -p aarch64/gnupg
cp arch-update.sh aarch64/gnupg
cp fastmirror.sh aarch64/gnupg
cat <<DOCKER > aarch64/gnupg/Dockerfile
FROM build-clavator
COPY gnupg-builder.sh /
COPY arch-update.sh /
COPY fastmirror.sh /
CMD ["/bin/sh", "/gnupg-builder.sh"]
DOCKER
cat <<GNUPG > aarch64/gnupg/gnupg-builder.sh
rm -rf /clavator/aarch64/gnupg /clavator/aarch64/gnupg.done
mount binfmt_misc -t binfmt_misc /proc/sys/fs/binfmt_misc
echo -1 > /proc/sys/fs/binfmt_misc/aarch64
echo ':aarch64:M::\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xb7\x00:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/bin/qemu-aarch64-static:' > /proc/sys/fs/binfmt_misc/register
mkdir -p /arch
wget $ARCHLINUXARM/os/ArchLinuxARM-odroid-c2-latest.tar.gz
bsdtar -xpf /ArchLinuxARM-odroid-c2-latest.tar.gz -C /arch
cp /usr/bin/qemu-aarch64-static /arch/usr/bin
cp /fastmirror.sh /arch
#cp /etc/pacman.d/mirrorlist /arch/etc/pacman.d
arch-chroot /arch /usr/bin/qemu-aarch64-static /bin/sh /fastmirror.sh
git clone https://github.com/mabels/gnupg.git -b quick-keytocard /arch/gnupg
cd /arch/gnupg && git rev-parse --verify --short HEAD > VERSION
cd /arch/gnupg && sh ./autogen.sh
cp /arch-update.sh /arch
arch-chroot /arch /usr/bin/qemu-aarch64-static /bin/sh /arch-update.sh
rch-chroot /arch /usr/bin/qemu-aarch64-static /usr/bin/pacman --noconfirm -Sy imagemagick mesa-libgl librsvg fig2dev ghostscript texinfo
arch-chroot /arch /usr/bin/qemu-aarch64-static /bin/sh -c \
"cd /gnupg && ./configure --sysconfdir=/etc --enable-maintainer-mode && make"
mkdir -p /clavator/aarch64
cp -pr /arch/gnupg /clavator/aarch64
touch /clavator/aarch64/gnupg.done
GNUPG
docker build -t build-gnupg-aarch64 aarch64/gnupg
docker run -d --privileged \
-v /var/cache/docker/clavator:/clavator \
-t build-gnupg-aarch64
|
mabels/clavator
|
image/build-gnupg-aarch64.sh
|
Shell
|
apache-2.0
| 1,840 |
#!/bin/bash
set -e -x
export ETCD_PORT=${ETCD_PORT:-$ETCD_1_PORT_4001_TCP_PORT}
export ETCD_HOST=${ETCD_HOST:-$ETCD_1_PORT_4001_TCP_ADDR}
export ETCD=http://$ETCD_HOST:$ETCD_PORT
echo "[nginx] booting container. ETCD: $ETCD"
echo "starting supervisor in foreground"
supervisord -e debug
|
lewg/docker-nginx-confd
|
run-debug.sh
|
Shell
|
apache-2.0
| 290 |
#!/bin/bash
set -e
# initialization
initialize_migrator() {
# sets colors for use in output
GREEN='\e[32m'
BLUE='\e[34m'
YELLOW='\e[0;33m'
RED='\e[31m'
BOLD='\e[1m'
CLEAR='\e[0m'
# pre-configure ok, warning, and error output
OK="[${GREEN}OK${CLEAR}]"
INFO="[${BLUE}INFO${CLEAR}]"
NOTICE="[${YELLOW}!!${CLEAR}]"
ERROR="[${RED}ERROR${CLEAR}]"
# trap for ctrl+c
# trap 'catch_error User exited' SIGINT
# set default error action to prompt if none provided
ERROR_ACTION=${ERROR_ACTION:-prompt}
# set default to prompt user for validation
USER_PROMPT=${USER_PROMPT:-true}
# set default to require docker login
NO_LOGIN=${NO_LOGIN:-false}
# Default is to require curl to perform certificate validation
USE_INSECURE_CURL=${USE_INSECURE_CURL:-false}
[ '$USE_INSECURE_CURL' == 'true' ] && 'INSECURE_CURL'='-k' || INSECURE_CURL=''
}
# verify requirements met for script to execute properly
verify_ready() {
# verify v1 registry variable has been passed
if [ -z "${V1_REGISTRY}" ]
then
catch_error "${BOLD}V1_REGISTRY${CLEAR} environment variable required"
fi
# verify v2 registry variable has been passed
if [ -z "${V2_REGISTRY}" ]
then
catch_error "${BOLD}V2_REGISTRY${CLEAR} environment variable required"
fi
# verify valid error action
if [ "${ERROR_ACTION}" != "prompt" ] && [ "${ERROR_ACTION}" != "retry" ] && [ "${ERROR_ACTION}" != "skip" ] && [ "${ERROR_ACTION}" != "abort" ]
then
catch_error "${BOLD}ERROR_ACTION${CLEAR} environment variable (${ERROR_ACTION}) invalid; must be one of the following: ${BOLD}prompt${CLEAR}, ${BOLD}retry${CLEAR}, ${BOLD}skip${CLEAR}, or ${BOLD}abort${CLEAR}"
fi
# verify valid user prompt variable
if [ "${USER_PROMPT}" != "true" ] && [ "${USER_PROMPT}" != "false" ]
then
catch_error "${BOLD}USER_PROMPT${CLEAR} environment variable (${USER_PROMPT}) invalid; must be either ${BOLD}true${CLEAR} or ${BOLD}false${CLEAR}"
fi
# verify docker daemon is accessible
if ! $(docker info > /dev/null 2>&1)
then
catch_error "Docker daemon not accessible. Is the Docker socket shared into the container as a volume?"
fi
}
# generic error catching
catch_error(){
echo -e "\n${ERROR} ${@}"
echo -e "${ERROR} Migration from v1 to v2 failed!"
exit 1
}
# catch push/pull error
catch_push_pull_error() {
# set environment variables to handle arguments
ACTION="${1}"
IMAGE="${2}"
TEMP_ERROR_ACTION=${3:-${ERROR_ACTION}}
# perform action based off of error action
case $TEMP_ERROR_ACTION in
prompt)
# prompt user for course of action
echo -e "${ERROR} Failed to ${ACTION} ${IMAGE}"
echo -en "\n${NOTICE} "
read -rp $"Retry, skip, or abort? {r|s|a} " RESPONSE; echo
# act based on user response
case ${RESPONSE} in
r|R)
# re-run function with retry
catch_push_pull_error "${ACTION}" "${IMAGE}" "retry"
;;
s|S)
# re-run function with skip
catch_push_pull_error "${ACTION}" "${IMAGE}" "skip"
;;
a|A)
# re-run function with abort
catch_push_pull_error "${ACTION}" "${IMAGE}" "abort"
;;
*)
# invalid user response; re-run function with prompt
echo -e "\n${ERROR} Invalid response"
catch_push_pull_error "${ACTION}" "${IMAGE}" "prompt"
;;
esac
;;
retry)
# run push or pull again
echo -e "${ERROR} Failed to ${ACTION} ${IMAGE}; retrying\n"
push_pull_image "${ACTION}" "${IMAGE}"
;;
skip)
# skip push or pull and proceeed
echo -e "${ERROR} Failed to ${ACTION} ${IMAGE}; skipping\n"
;;
abort)
# abort and exit migration
catch_error "Failed to ${ACTION} ${IMAGE}; aborting"
;;
esac
}
# catch retag error
catch_retag_error() {
# set environment variables to handle arguments
SOURCE_IMAGE="${1}"
DESTINATION_IMAGE="${2}"
TEMP_ERROR_ACTION=${3:-${ERROR_ACTION}}
# perform action based off of error action
case $TEMP_ERROR_ACTION in
prompt)
# prompt user for course of action
echo -e "${ERROR} Failed to retag ${SOURCE_IMAGE} > ${DESTINATION_IMAGE}"
echo -en "\n${NOTICE} "
read -rp $"Retry, skip, or abort? {r|s|a} " RESPONSE; echo
# act based on user response
case ${RESPONSE} in
r|R)
# re-run function with retry
catch_retag_error "${SOURCE_IMAGE}" "${DESTINATION_IMAGE}" "retry"
;;
s|S)
# re-run function with skip
catch_retag_error "${SOURCE_IMAGE}" "${DESTINATION_IMAGE}" "skip"
;;
a|A)
# re-run function with abort
catch_retag_error "${SOURCE_IMAGE}" "${DESTINATION_IMAGE}" "abort"
;;
*)
# invalid user response; re-run function with prompt
echo -e "\n${ERROR} Invalid response"
catch_retag_error "${SOURCE_IMAGE}" "${DESTINATION_IMAGE}" "prompt"
;;
esac
;;
retry)
# run retag again
echo -e "${ERROR} Failed to retag ${IMAGE}; retrying\n"
retag_image "${SOURCE_IMAGE}" "${DESTINATION_IMAGE}"
;;
skip)
# skip retag and proceed
echo -e "${ERROR} Failed to retag ${IMAGE}; skipping\n"
;;
abort)
# abort and exit migration
catch_error "Failed to retag ${IMAGE}; aborting"
;;
esac
}
# perform a docker login
docker_login() {
REGISTRY="${1}"
USERNAME="${2}"
PASSWORD="${3}"
EMAIL="${4}"
if [ -n "${USERNAME}" ] && [ -n "${PASSWORD}" ] && [ -n "${EMAIL}" ]
then
# docker login with credentials provided
docker login --username="${USERNAME}" --password="${PASSWORD}" --email="${EMAIL}" ${REGISTRY} || catch_error "Failed to login using provided credentials"
else
# prompt for credentials for docker login
echo -e "${NOTICE} Please login to ${REGISTRY}:"
LOGIN_SUCCESS="false"
# keep retrying docker login until successful
while [ "$LOGIN_SUCCESS" = "false" ]
do
docker login ${REGISTRY} && LOGIN_SUCCESS="true"
done
fi
}
# decode username/password for a registry to query the API
decode_auth() {
AUTH_CREDS="$(cat ~/.dockercfg | jq -r '."'${1}'".auth' | base64 --decode)"
}
# query the v1 registry for a list of all images
query_v1_images() {
echo -e "\n${INFO} Getting a list of images from ${V1_REGISTRY}"
# check to see if a filter pattern was provided
if [ -z "${V1_REPO_FILTER}" ]
then
# no filter pattern was defined, get all repos
REPO_LIST="$(curl ${INSECURE_CURL} -s ${V1_REGISTRY}/v1/search?q= | jq -r '.results | .[] | .name')"
else
# filter pattern defined, use grep to match repos w/regex capabilites
echo "curl ${INSECURE_CURL} -s ${V1_REGISTRY}/v1/search?q= | jq -r '.results | .[] | .name' | grep ${V1_REPO_FILTER}"
REPO_LIST=`curl ${INSECURE_CURL} -s ${V1_REGISTRY}/v1/search?q= | jq -r '.results | .[] | .name' | grep ${V1_REPO_FILTER}`
fi
echo REPO_LIST $REPO_LIST
# loop through all repos in v1 registry to get tags for each
for i in ${REPO_LIST}
do
# get list of tags for image i
IMAGE_TAGS=$(curl ${INSECURE_CURL} -s ${V1_REGISTRY}/v1/repositories/${i}/tags | jq -r 'keys | .[]')
# loop through tags to create list of full image names w/tags
for j in ${IMAGE_TAGS}
do
# check if an image is a 'library' image without a namespace
i=`echo $i |sed -e "s/^library\///g"`
# add image to list
FULL_IMAGE_LIST="${FULL_IMAGE_LIST} ${i}:${j}"
done
done
echo -e "${OK} Successfully retrieved list of Docker images from ${V1_REGISTRY}"
echo FULL_IMAGE_LIST $FULL_IMAGE_LIST
}
# show list of images from the v1 registry
show_v1_image_list() {
echo -e "\n${INFO} Full list of images from ${V1_REGISTRY} to be migrated:"
for i in ${FULL_IMAGE_LIST}
do
echo ${V1_REGISTRY}/${i}
done
echo -e "${OK} End full list of images from ${V1_REGISTRY}"
# check to see if user should be prompted
if ${USER_PROMPT}
then
# prompt user to press any key to begin migration
echo -en "\n${NOTICE} "
fi
}
# push/pull image
push_pull_image() {
# get action and image name passed
ACTION="${1}"
IMAGE="${2}"
# check the action and act accordingly
case ${ACTION} in
push)
# push image
echo -e "${INFO} Pushing ${IMAGE}"
(docker push ${IMAGE} && echo -e "${OK} Successfully ${ACTION}ed ${IMAGE}\n") || catch_push_pull_error "push" "${IMAGE}"
;;
pull)
# pull image
echo -e "${INFO} Pulling ${IMAGE}"
(docker pull ${IMAGE} && echo -e "${OK} Successfully ${ACTION}ed ${IMAGE}\n") || catch_push_pull_error "pull" "${IMAGE}"
;;
esac
}
# retag image
retag_image() {
# get source and destination image names passed
SOURCE_IMAGE="${1}"
DESTINATION_IMAGE="${2}"
# retag image
(docker tag -f ${SOURCE_IMAGE} ${DESTINATION_IMAGE} && echo -e "${OK} ${V1_REGISTRY}/${i} > ${V2_REGISTRY}/${i}") || catch_retag_error "${SOURCE_IMAGE}" "${DESTINATION_IMAGE}"
}
# pull all images to local system
pull_images_from_v1() {
echo -e "\n${INFO} Pulling all images from ${V1_REGISTRY} to your local system"
for i in ${FULL_IMAGE_LIST}
do
push_pull_image "pull" "${V1_REGISTRY}/${i}"
done
echo -e "${OK} Successully pulled all images from ${V1_REGISTRY} to your local system"
}
# check to see if v1 and v2 registry share the same DNS name
check_registry_swap_or_retag() {
if [ "${V1_REGISTRY}" = "${V2_REGISTRY}" ]
then
# retagging not needed; re-using same DNS name for v2 registry
echo -e "${OK} Skipping re-tagging; same URL used for v1 and v2\n"
# notify user to swtich out their registry now
echo -en "${NOTICE} "
read -rsp $'Make the necessary changes to switch your v1 and v2 registries and then press any key to continue\n' key
else
# re-tag images; different DNS name used for v2 registry
echo -e "\n${INFO} Retagging all images from '${V1_REGISTRY}' to '${V2_REGISTRY}'"
for i in ${FULL_IMAGE_LIST}
do
retag_image "${V1_REGISTRY}/${i}" "${V2_REGISTRY}/${i}"
done
echo -e "${OK} Successfully retagged all images"
fi
}
# verify V2_REGISTRY is reporting as a v2 registry
verify_v2_ready() {
V2_READY="false"
while [ "${V2_READY}" = "false" ]
do
# check to see if V2_REGISTRY is returning the proper api version string
# api version indicates v2; sets value to exit loop
V2_READY="true"
done
# v2 registry verified as available
echo -e "\n${OK} Verified v2 registry (${V2_REGISTRY}) is available"
}
# push images to v2 registry
push_images_to_v2() {
echo -e "\n${INFO} Pushing all images to ${V2_REGISTRY}"
for i in ${FULL_IMAGE_LIST}
do
push_pull_image "push" "${V2_REGISTRY}/${i}"
done
echo -e "${OK} Successfully pushed all images to ${V2_REGISTRY}"
}
# cleanup images from local docker engine
cleanup_local_engine() {
echo -e "\n${INFO} Cleaning up images from local Docker engine"
# see if re-tagged images exist and remove accordingly
if [ "${V1_REGISTRY}" = "${V2_REGISTRY}" ]
then
for i in ${FULL_IMAGE_LIST}
do
# remove docker image/tags; allow failures here (in case image is actually in use)
docker rmi ${V1_REGISTRY}/${i} || true
done
else
for i in ${FULL_IMAGE_LIST}
do
# remove docker image/tags; allow failures here (in case image is actually in use)
docker rmi ${V1_REGISTRY}/${i} || true
docker rmi ${V2_REGISTRY}/${i} || true
done
fi
echo -e "${OK} Successfully cleaned up images from local Docker engine"
}
# migration complete
migration_complete() {
echo -e "\n${OK} Migration from v1 to v2 complete!"
}
# main function
main() {
initialize_migrator
verify_ready
if [ "${NO_LOGIN}" != "true" ]; then
docker_login ${V1_REGISTRY} ${V1_USERNAME} ${V1_PASSWORD} ${V1_EMAIL}
fi
decode_auth ${V1_REGISTRY}
query_v1_images
show_v1_image_list
pull_images_from_v1
check_registry_swap_or_retag
verify_v2_ready
if [ "${NO_LOGIN}" != "true" ]; then
docker_login ${V2_REGISTRY} ${V2_USERNAME} ${V2_PASSWORD} ${V2_EMAIL}
fi
push_images_to_v2
cleanup_local_engine
migration_complete
}
main "$@"
|
wangyumi/migrator
|
migrator.sh
|
Shell
|
apache-2.0
| 12,197 |
javac test/tests/test9/*.java
rm ./classes/test9/*.class
mv test/tests/test9/*.class ./classes/test9/
java -ea -cp $INTRUDER_CLASSPATH:./classes intruder.wrapper.Main --user-specified-test test9.Test -intruder-output-dir "./output/test9/" > tmp.txt
sh ./scripts/cleanup.sh
echo "-------------------------------------------------------"
echo "Output of analysis placed in $AMEN_HOME/output/test9/"
echo "-------------------------------------------------------"
|
Groostav/CMPT880-term-project
|
intruder/scripts/test9.sh
|
Shell
|
apache-2.0
| 462 |
GPU_ID=1
EVERY=1000
MODEL=LstmMemoryModel
MODEL_DIR="../model/lstmmemory1024_moe8_noise"
start=$1
DIR="$(pwd)"
for checkpoint in $(cd $MODEL_DIR && python ${DIR}/training_utils/select.py $EVERY); do
echo $checkpoint;
if [ $checkpoint -gt $start ]; then
echo $checkpoint;
CUDA_VISIBLE_DEVICES=$GPU_ID python eval.py \
--train_dir="$MODEL_DIR" \
--model_checkpoint_path="${MODEL_DIR}/model.ckpt-${checkpoint}" \
--eval_data_pattern="/Youtube-8M/data/frame/validate/validatea*" \
--frame_features=True \
--feature_names="rgb,audio" \
--feature_sizes="1024,128" \
--model=$MODEL \
--moe_num_mixtures=8 \
--lstm_cells=1024 \
--lstm_layers=2 \
--batch_size=64 \
--rnn_swap_memory=True \
--run_once=True
fi
done
|
wangheda/youtube-8m
|
youtube-8m-wangheda/eval_scripts/eval-lstmmem-noise.sh
|
Shell
|
apache-2.0
| 769 |
function hmm() {
cat <<EOF
Invoke ". build/envsetup.sh" from your shell to add the following functions to your environment:
- lunch: lunch <product_name>-<build_variant>
- tapas: tapas [<App1> <App2> ...] [arm|x86|mips|armv5|arm64|x86_64|mips64] [eng|userdebug|user]
- croot: Changes directory to the top of the tree.
- cout: Changes directory to out.
- m: Makes from the top of the tree.
- mm: Builds all of the modules in the current directory, but not their dependencies.
- mmm: Builds all of the modules in the supplied directories, but not their dependencies.
To limit the modules being built use the syntax: mmm dir/:target1,target2.
- mma: Builds all of the modules in the current directory, and their dependencies.
- mmp: Builds all of the modules in the current directory and pushes them to the device.
- mmmp: Builds all of the modules in the supplied directories and pushes them to the device.
- mmma: Builds all of the modules in the supplied directories, and their dependencies.
- cgrep: Greps on all local C/C++ files.
- ggrep: Greps on all local Gradle files.
- jgrep: Greps on all local Java files.
- resgrep: Greps on all local res/*.xml files.
- mangrep: Greps on all local AndroidManifest.xml files.
- sepgrep: Greps on all local sepolicy files.
- sgrep: Greps on all local source files.
- godir: Go to the directory containing a file.
- cmremote: Add git remote for CM Gerrit Review
- cmgerrit: A Git wrapper that fetches/pushes patch from/to CM Gerrit Review
- cmrebase: Rebase a Gerrit change and push it again
- aospremote: Add git remote for matching AOSP repository
- cafremote: Add git remote for matching CodeAurora repository.
- mka: Builds using SCHED_BATCH on all processors
- mkap: Builds the module(s) using mka and pushes them to the device.
- cmka: Cleans and builds using mka.
- repolastsync: Prints date and time of last repo sync.
- reposync: Parallel repo sync using ionice and SCHED_BATCH
- repopick: Utility to fetch changes from Gerrit.
- installboot: Installs a boot.img to the connected device.
- installrecovery: Installs a recovery.img to the connected device.
Environemnt options:
- SANITIZE_HOST: Set to 'true' to use ASAN for all host modules. Note that
ASAN_OPTIONS=detect_leaks=0 will be set by default until the
build is leak-check clean.
Look at the source to view more functions. The complete list is:
EOF
T=$(gettop)
for i in `cat $T/build/envsetup.sh | sed -n "/^[ \t]*function /s/function \([a-z_]*\).*/\1/p" | sort | uniq`; do
echo "$i"
done | column
}
# Get the value of a build variable as an absolute path.
function get_abs_build_var()
{
T=$(gettop)
if [ ! "$T" ]; then
echo "Couldn't locate the top of the tree. Try setting TOP." >&2
return
fi
(\cd $T; CALLED_FROM_SETUP=true BUILD_SYSTEM=build/core \
command make --no-print-directory -f build/core/config.mk dumpvar-abs-$1)
}
# Get the exact value of a build variable.
function get_build_var()
{
T=$(gettop)
if [ ! "$T" ]; then
echo "Couldn't locate the top of the tree. Try setting TOP." >&2
return
fi
(\cd $T; CALLED_FROM_SETUP=true BUILD_SYSTEM=build/core \
command make --no-print-directory -f build/core/config.mk dumpvar-$1)
}
# check to see if the supplied product is one we can build
function check_product()
{
T=$(gettop)
if [ ! "$T" ]; then
echo "Couldn't locate the top of the tree. Try setting TOP." >&2
return
fi
if (echo -n $1 | grep -q -e "^ant_") ; then
ANT_BUILD=$(echo -n $1 | sed -e 's/^ant_//g')
export BUILD_NUMBER=$((date +%s%N ; echo $ANT_BUILD; hostname) | openssl sha1 | sed -e 's/.*=//g; s/ //g' | cut -c1-10)
else
ANT_BUILD=
fi
export ANT_BUILD
TARGET_PRODUCT=$1 \
TARGET_BUILD_VARIANT= \
TARGET_BUILD_TYPE= \
TARGET_BUILD_APPS= \
get_build_var TARGET_DEVICE > /dev/null
# hide successful answers, but allow the errors to show
}
VARIANT_CHOICES=(user userdebug eng)
# check to see if the supplied variant is valid
function check_variant()
{
for v in ${VARIANT_CHOICES[@]}
do
if [ "$v" = "$1" ]
then
return 0
fi
done
return 1
}
function setpaths()
{
T=$(gettop)
if [ ! "$T" ]; then
echo "Couldn't locate the top of the tree. Try setting TOP."
return
fi
##################################################################
# #
# Read me before you modify this code #
# #
# This function sets ANDROID_BUILD_PATHS to what it is adding #
# to PATH, and the next time it is run, it removes that from #
# PATH. This is required so lunch can be run more than once #
# and still have working paths. #
# #
##################################################################
# Note: on windows/cygwin, ANDROID_BUILD_PATHS will contain spaces
# due to "C:\Program Files" being in the path.
# out with the old
if [ -n "$ANDROID_BUILD_PATHS" ] ; then
export PATH=${PATH/$ANDROID_BUILD_PATHS/}
fi
if [ -n "$ANDROID_PRE_BUILD_PATHS" ] ; then
export PATH=${PATH/$ANDROID_PRE_BUILD_PATHS/}
# strip leading ':', if any
export PATH=${PATH/:%/}
fi
# and in with the new
prebuiltdir=$(getprebuilt)
gccprebuiltdir=$(get_abs_build_var ANDROID_GCC_PREBUILTS)
# defined in core/config.mk
targetgccversion=$(get_build_var TARGET_GCC_VERSION)
targetgccversion2=$(get_build_var 2ND_TARGET_GCC_VERSION)
targetlegacygccversion=$(get_build_var TARGET_LEGACY_GCC_VERSION)
export TARGET_GCC_VERSION=$targetgccversion
# The gcc toolchain does not exists for windows/cygwin. In this case, do not reference it.
export ANDROID_TOOLCHAIN=
export ANDROID_TOOLCHAIN_2ND_ARCH=
local ARCH=$(get_build_var TARGET_ARCH)
case $ARCH in
x86) toolchaindir=x86/x86_64-linux-android-$targetgccversion/bin
;;
x86_64) toolchaindir=x86/x86_64-linux-android-$targetgccversion/bin
;;
arm) toolchaindir=arm/arm-linux-androideabi-$targetgccversion/bin
;;
arm64) toolchaindir=aarch64/aarch64-linux-android-$targetgccversion/bin;
toolchaindir2=arm/arm-linux-androideabi-$targetgccversion2/bin
;;
mips|mips64) toolchaindir=mips/mips64el-linux-android-$targetgccversion/bin
;;
*)
echo "Can't find toolchain for unknown architecture: $ARCH"
toolchaindir=xxxxxxxxx
;;
esac
if [ -d "$gccprebuiltdir/$toolchaindir" ]; then
export ANDROID_TOOLCHAIN=$gccprebuiltdir/$toolchaindir
fi
if [ -d "$gccprebuiltdir/$toolchaindir2" ]; then
export ANDROID_TOOLCHAIN_2ND_ARCH=$gccprebuiltdir/$toolchaindir2
fi
unset ANDROID_KERNEL_TOOLCHAIN_PATH
case $ARCH in
arm)
# Legacy toolchain configuration used for ARM kernel compilation
toolchaindir=arm/arm-eabi-$targetlegacygccversion/bin
if [ -d "$gccprebuiltdir/$toolchaindir" ]; then
export ARM_EABI_TOOLCHAIN="$gccprebuiltdir/$toolchaindir"
ANDROID_KERNEL_TOOLCHAIN_PATH="$ARM_EABI_TOOLCHAIN":
fi
;;
*)
# No need to set ARM_EABI_TOOLCHAIN for other ARCHs
;;
esac
export ANDROID_DEV_SCRIPTS=$T/development/scripts:$T/prebuilts/devtools/tools:$T/external/selinux/prebuilts/bin
export ANDROID_BUILD_PATHS=$(get_build_var ANDROID_BUILD_PATHS):$ANDROID_TOOLCHAIN:$ANDROID_TOOLCHAIN_2ND_ARCH:$ANDROID_KERNEL_TOOLCHAIN_PATH$ANDROID_DEV_SCRIPTS:
# If prebuilts/android-emulator/<system>/ exists, prepend it to our PATH
# to ensure that the corresponding 'emulator' binaries are used.
case $(uname -s) in
Darwin)
ANDROID_EMULATOR_PREBUILTS=$T/prebuilts/android-emulator/darwin-x86_64
;;
Linux)
ANDROID_EMULATOR_PREBUILTS=$T/prebuilts/android-emulator/linux-x86_64
;;
*)
ANDROID_EMULATOR_PREBUILTS=
;;
esac
if [ -n "$ANDROID_EMULATOR_PREBUILTS" -a -d "$ANDROID_EMULATOR_PREBUILTS" ]; then
ANDROID_BUILD_PATHS=$ANDROID_BUILD_PATHS$ANDROID_EMULATOR_PREBUILTS:
export ANDROID_EMULATOR_PREBUILTS
fi
export PATH=$ANDROID_BUILD_PATHS$PATH
unset ANDROID_JAVA_TOOLCHAIN
unset ANDROID_PRE_BUILD_PATHS
if [ -n "$JAVA_HOME" ]; then
export ANDROID_JAVA_TOOLCHAIN=$JAVA_HOME/bin
export ANDROID_PRE_BUILD_PATHS=$ANDROID_JAVA_TOOLCHAIN:
export PATH=$ANDROID_PRE_BUILD_PATHS$PATH
fi
unset ANDROID_PRODUCT_OUT
export ANDROID_PRODUCT_OUT=$(get_abs_build_var PRODUCT_OUT)
export OUT=$ANDROID_PRODUCT_OUT
unset ANDROID_HOST_OUT
export ANDROID_HOST_OUT=$(get_abs_build_var HOST_OUT)
if [ -n "$ANDROID_CCACHE_DIR" ]; then
export CCACHE_DIR=$ANDROID_CCACHE_DIR
fi
# needed for building linux on MacOS
# TODO: fix the path
#export HOST_EXTRACFLAGS="-I "$T/system/kernel_headers/host_include
}
function printconfig()
{
T=$(gettop)
if [ ! "$T" ]; then
echo "Couldn't locate the top of the tree. Try setting TOP." >&2
return
fi
get_build_var report_config
}
function set_stuff_for_environment()
{
settitle
set_java_home
setpaths
set_sequence_number
# With this environment variable new GCC can apply colors to warnings/errors
export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
export ASAN_OPTIONS=detect_leaks=0
}
function set_sequence_number()
{
export BUILD_ENV_SEQUENCE_NUMBER=10
}
function settitle()
{
if [ "$STAY_OFF_MY_LAWN" = "" ]; then
local arch=$(gettargetarch)
local product=$TARGET_PRODUCT
local variant=$TARGET_BUILD_VARIANT
local apps=$TARGET_BUILD_APPS
if [ -z "$PROMPT_COMMAND" ]; then
# No prompts
PROMPT_COMMAND="echo -ne \"\033]0;${USER}@${HOSTNAME}: ${PWD}\007\""
elif [ -z "$(echo $PROMPT_COMMAND | grep '033]0;')" ]; then
# Prompts exist, but no hardstatus
PROMPT_COMMAND="echo -ne \"\033]0;${USER}@${HOSTNAME}: ${PWD}\007\";${PROMPT_COMMAND}"
fi
if [ ! -z "$ANDROID_PROMPT_PREFIX" ]; then
PROMPT_COMMAND="$(echo $PROMPT_COMMAND | sed -e 's/$ANDROID_PROMPT_PREFIX //g')"
fi
if [ -z "$apps" ]; then
ANDROID_PROMPT_PREFIX="[${arch}-${product}-${variant}]"
else
ANDROID_PROMPT_PREFIX="[$arch $apps $variant]"
fi
export ANDROID_PROMPT_PREFIX
# Inject build data into hardstatus
export PROMPT_COMMAND="$(echo $PROMPT_COMMAND | sed -e 's/\\033]0;\(.*\)\\007/\\033]0;$ANDROID_PROMPT_PREFIX \1\\007/g')"
fi
}
function check_bash_version()
{
# Keep us from trying to run in something that isn't bash.
if [ -z "${BASH_VERSION}" ]; then
return 1
fi
# Keep us from trying to run in bash that's too old.
if [ "${BASH_VERSINFO[0]}" -lt 4 ] ; then
return 2
fi
return 0
}
function choosetype()
{
echo "Build type choices are:"
echo " 1. release"
echo " 2. debug"
echo
local DEFAULT_NUM DEFAULT_VALUE
DEFAULT_NUM=1
DEFAULT_VALUE=release
export TARGET_BUILD_TYPE=
local ANSWER
while [ -z $TARGET_BUILD_TYPE ]
do
echo -n "Which would you like? ["$DEFAULT_NUM"] "
if [ -z "$1" ] ; then
read ANSWER
else
echo $1
ANSWER=$1
fi
case $ANSWER in
"")
export TARGET_BUILD_TYPE=$DEFAULT_VALUE
;;
1)
export TARGET_BUILD_TYPE=release
;;
release)
export TARGET_BUILD_TYPE=release
;;
2)
export TARGET_BUILD_TYPE=debug
;;
debug)
export TARGET_BUILD_TYPE=debug
;;
*)
echo
echo "I didn't understand your response. Please try again."
echo
;;
esac
if [ -n "$1" ] ; then
break
fi
done
set_stuff_for_environment
}
#
# This function isn't really right: It chooses a TARGET_PRODUCT
# based on the list of boards. Usually, that gets you something
# that kinda works with a generic product, but really, you should
# pick a product by name.
#
function chooseproduct()
{
if [ "x$TARGET_PRODUCT" != x ] ; then
default_value=$TARGET_PRODUCT
else
default_value=full
fi
export TARGET_PRODUCT=
local ANSWER
while [ -z "$TARGET_PRODUCT" ]
do
echo -n "Which product would you like? [$default_value] "
if [ -z "$1" ] ; then
read ANSWER
else
echo $1
ANSWER=$1
fi
if [ -z "$ANSWER" ] ; then
export TARGET_PRODUCT=$default_value
else
if check_product $ANSWER
then
export TARGET_PRODUCT=$ANSWER
else
echo "** Not a valid product: $ANSWER"
fi
fi
if [ -n "$1" ] ; then
break
fi
done
set_stuff_for_environment
}
function choosevariant()
{
echo "Variant choices are:"
local index=1
local v
for v in ${VARIANT_CHOICES[@]}
do
# The product name is the name of the directory containing
# the makefile we found, above.
echo " $index. $v"
index=$(($index+1))
done
local default_value=eng
local ANSWER
export TARGET_BUILD_VARIANT=
while [ -z "$TARGET_BUILD_VARIANT" ]
do
echo -n "Which would you like? [$default_value] "
if [ -z "$1" ] ; then
read ANSWER
else
echo $1
ANSWER=$1
fi
if [ -z "$ANSWER" ] ; then
export TARGET_BUILD_VARIANT=$default_value
elif (echo -n $ANSWER | grep -q -e "^[0-9][0-9]*$") ; then
if [ "$ANSWER" -le "${#VARIANT_CHOICES[@]}" ] ; then
export TARGET_BUILD_VARIANT=${VARIANT_CHOICES[$(($ANSWER-1))]}
fi
else
if check_variant $ANSWER
then
export TARGET_BUILD_VARIANT=$ANSWER
else
echo "** Not a valid variant: $ANSWER"
fi
fi
if [ -n "$1" ] ; then
break
fi
done
}
function choosecombo()
{
choosetype $1
echo
echo
chooseproduct $2
echo
echo
choosevariant $3
echo
set_stuff_for_environment
printconfig
}
# Clear this variable. It will be built up again when the vendorsetup.sh
# files are included at the end of this file.
unset LUNCH_MENU_CHOICES
function add_lunch_combo()
{
local new_combo=$1
local c
for c in ${LUNCH_MENU_CHOICES[@]} ; do
if [ "$new_combo" = "$c" ] ; then
return
fi
done
LUNCH_MENU_CHOICES=(${LUNCH_MENU_CHOICES[@]} $new_combo)
}
# add the default one here
add_lunch_combo aosp_arm-eng
add_lunch_combo aosp_arm64-eng
add_lunch_combo aosp_mips-eng
add_lunch_combo aosp_mips64-eng
add_lunch_combo aosp_x86-eng
add_lunch_combo aosp_x86_64-eng
function print_lunch_menu()
{
local uname=$(uname)
echo
echo "You're building on" $uname
if [ "$(uname)" = "Darwin" ] ; then
echo " (ohai, koush!)"
fi
echo
if [ "z${ANT_DEVICES_ONLY}" != "z" ]; then
echo "Breakfast menu... pick a combo:"
else
echo "Lunch menu... pick a combo:"
fi
local i=1
local choice
for choice in ${LUNCH_MENU_CHOICES[@]}
do
echo " $i. $choice "
i=$(($i+1))
done | column
if [ "z${ANT_DEVICES_ONLY}" != "z" ]; then
echo "... and don't forget the ant!"
fi
echo
}
function brunch()
{
breakfast $*
if [ $? -eq 0 ]; then
mka ant
else
echo "No such item in brunch menu. Try 'breakfast'"
return 1
fi
return $?
}
function breakfast()
{
target=$1
local variant=$2
ANT_DEVICES_ONLY="true"
unset LUNCH_MENU_CHOICES
add_lunch_combo full-eng
for f in `/bin/ls vendor/antos/vendorsetup.sh 2> /dev/null`
do
echo "including $f"
. $f
done
unset f
if [ $# -eq 0 ]; then
# No arguments, so let's have the full menu
lunch
else
echo "z$target" | grep -q "-"
if [ $? -eq 0 ]; then
# A buildtype was specified, assume a full device name
lunch $target
else
# This is probably just the CM model name
if [ -z "$variant" ]; then
variant="userdebug"
fi
lunch ant_$target-$variant
fi
fi
return $?
}
alias bib=breakfast
function lunch()
{
local answer
LUNCH_MENU_CHOICES=($(for l in ${LUNCH_MENU_CHOICES[@]}; do echo "$l"; done | sort))
if [ "$1" ] ; then
answer=$1
else
print_lunch_menu
echo -n "Which would you like? [aosp_arm-eng] "
read answer
fi
local selection=
if [ -z "$answer" ]
then
selection=aosp_arm-eng
elif (echo -n $answer | grep -q -e "^[0-9][0-9]*$")
then
if [ $answer -le ${#LUNCH_MENU_CHOICES[@]} ]
then
selection=${LUNCH_MENU_CHOICES[$(($answer-1))]}
fi
elif (echo -n $answer | grep -q -e "^[^\-][^\-]*-[^\-][^\-]*$")
then
selection=$answer
fi
if [ -z "$selection" ]
then
echo
echo "Invalid lunch combo: $answer"
return 1
fi
export TARGET_BUILD_APPS=
local product=$(echo -n $selection | sed -e "s/-.*$//")
check_product $product
if [ $? -ne 0 ]
then
# if we can't find a product, try to grab it off the CM github
T=$(gettop)
pushd $T > /dev/null
build/tools/roomservice.py $product
popd > /dev/null
check_product $product
else
build/tools/roomservice.py $product true
fi
if [ $? -ne 0 ]
then
echo
echo "** Don't have a product spec for: '$product'"
echo "** Do you have the right repo manifest?"
product=
fi
local variant=$(echo -n $selection | sed -e "s/^[^\-]*-//")
check_variant $variant
if [ $? -ne 0 ]
then
echo
echo "** Invalid variant: '$variant'"
echo "** Must be one of ${VARIANT_CHOICES[@]}"
variant=
fi
if [ -z "$product" -o -z "$variant" ]
then
echo
return 1
fi
export TARGET_PRODUCT=$product
export TARGET_BUILD_VARIANT=$variant
export TARGET_BUILD_TYPE=release
echo
fixup_common_out_dir
set_stuff_for_environment
printconfig
}
# Tab completion for lunch.
function _lunch()
{
local cur prev opts
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
COMPREPLY=( $(compgen -W "${LUNCH_MENU_CHOICES[*]}" -- ${cur}) )
return 0
}
complete -F _lunch lunch 2>/dev/null
# Configures the build to build unbundled apps.
# Run tapas with one or more app names (from LOCAL_PACKAGE_NAME)
function tapas()
{
local arch="$(echo $* | xargs -n 1 echo | \grep -E '^(arm|x86|mips|armv5|arm64|x86_64|mips64)$' | xargs)"
local variant="$(echo $* | xargs -n 1 echo | \grep -E '^(user|userdebug|eng)$' | xargs)"
local density="$(echo $* | xargs -n 1 echo | \grep -E '^(ldpi|mdpi|tvdpi|hdpi|xhdpi|xxhdpi|xxxhdpi|alldpi)$' | xargs)"
local apps="$(echo $* | xargs -n 1 echo | \grep -E -v '^(user|userdebug|eng|arm|x86|mips|armv5|arm64|x86_64|mips64|ldpi|mdpi|tvdpi|hdpi|xhdpi|xxhdpi|xxxhdpi|alldpi)$' | xargs)"
if [ $(echo $arch | wc -w) -gt 1 ]; then
echo "tapas: Error: Multiple build archs supplied: $arch"
return
fi
if [ $(echo $variant | wc -w) -gt 1 ]; then
echo "tapas: Error: Multiple build variants supplied: $variant"
return
fi
if [ $(echo $density | wc -w) -gt 1 ]; then
echo "tapas: Error: Multiple densities supplied: $density"
return
fi
local product=full
case $arch in
x86) product=full_x86;;
mips) product=full_mips;;
armv5) product=generic_armv5;;
arm64) product=aosp_arm64;;
x86_64) product=aosp_x86_64;;
mips64) product=aosp_mips64;;
esac
if [ -z "$variant" ]; then
variant=eng
fi
if [ -z "$apps" ]; then
apps=all
fi
if [ -z "$density" ]; then
density=alldpi
fi
export TARGET_PRODUCT=$product
export TARGET_BUILD_VARIANT=$variant
export TARGET_BUILD_DENSITY=$density
export TARGET_BUILD_TYPE=release
export TARGET_BUILD_APPS=$apps
set_stuff_for_environment
printconfig
}
function eat()
{
if [ "$OUT" ] ; then
MODVERSION=$(get_build_var ANT_VERSION)
ZIPFILE=ant-$MODVERSION.zip
ZIPPATH=$OUT/$ZIPFILE
if [ ! -f $ZIPPATH ] ; then
echo "Nothing to eat"
return 1
fi
adb start-server # Prevent unexpected starting server message from adb get-state in the next line
if [ $(adb get-state) != device -a $(adb shell test -e /sbin/recovery 2> /dev/null; echo $?) != 0 ] ; then
echo "No device is online. Waiting for one..."
echo "Please connect USB and/or enable USB debugging"
until [ $(adb get-state) = device -o $(adb shell test -e /sbin/recovery 2> /dev/null; echo $?) = 0 ];do
sleep 1
done
echo "Device Found.."
fi
if (adb shell getprop ro.ant.device | grep -q "$ANT_BUILD");
then
# if adbd isn't root we can't write to /cache/recovery/
adb root
sleep 1
adb wait-for-device
cat << EOF > /tmp/command
--sideload_auto_reboot
EOF
if adb push /tmp/command /cache/recovery/ ; then
echo "Rebooting into recovery for sideload installation"
adb reboot recovery
adb wait-for-sideload
adb sideload $ZIPPATH
fi
rm /tmp/command
else
echo "Nothing to eat"
return 1
fi
return $?
else
echo "The connected device does not appear to be $ANT_BUILD, run away!"
fi
}
function omnom
{
brunch $*
eat
}
function gettop
{
local TOPFILE=build/core/envsetup.mk
if [ -n "$TOP" -a -f "$TOP/$TOPFILE" ] ; then
# The following circumlocution ensures we remove symlinks from TOP.
(cd $TOP; PWD= /bin/pwd)
else
if [ -f $TOPFILE ] ; then
# The following circumlocution (repeated below as well) ensures
# that we record the true directory name and not one that is
# faked up with symlink names.
PWD= /bin/pwd
else
local HERE=$PWD
T=
while [ \( ! \( -f $TOPFILE \) \) -a \( $PWD != "/" \) ]; do
\cd ..
T=`PWD= /bin/pwd -P`
done
\cd $HERE
if [ -f "$T/$TOPFILE" ]; then
echo $T
fi
fi
fi
}
# Return driver for "make", if any (eg. static analyzer)
function getdriver()
{
local T="$1"
test "$WITH_STATIC_ANALYZER" = "0" && unset WITH_STATIC_ANALYZER
if [ -n "$WITH_STATIC_ANALYZER" ]; then
echo "\
$T/prebuilts/misc/linux-x86/analyzer/tools/scan-build/scan-build \
--use-analyzer $T/prebuilts/misc/linux-x86/analyzer/bin/analyzer \
--status-bugs \
--top=$T"
fi
}
function m()
{
local T=$(gettop)
local DRV=$(getdriver $T)
if [ "$T" ]; then
$DRV make -C $T -f build/core/main.mk $@
else
echo "Couldn't locate the top of the tree. Try setting TOP."
return 1
fi
}
function findmakefile()
{
TOPFILE=build/core/envsetup.mk
local HERE=$PWD
T=
while [ \( ! \( -f $TOPFILE \) \) -a \( $PWD != "/" \) ]; do
T=`PWD= /bin/pwd`
if [ -f "$T/Android.mk" ]; then
echo $T/Android.mk
\cd $HERE
return
fi
\cd ..
done
\cd $HERE
}
function mm()
{
local T=$(gettop)
local DRV=$(getdriver $T)
# If we're sitting in the root of the build tree, just do a
# normal make.
if [ -f build/core/envsetup.mk -a -f Makefile ]; then
$DRV make $@
else
# Find the closest Android.mk file.
local M=$(findmakefile)
local MODULES=
local GET_INSTALL_PATH=
local ARGS=
# Remove the path to top as the makefilepath needs to be relative
local M=`echo $M|sed 's:'$T'/::'`
if [ ! "$T" ]; then
echo "Couldn't locate the top of the tree. Try setting TOP."
return 1
elif [ ! "$M" ]; then
echo "Couldn't locate a makefile from the current directory."
return 1
else
for ARG in $@; do
case $ARG in
GET-INSTALL-PATH) GET_INSTALL_PATH=$ARG;;
esac
done
if [ -n "$GET_INSTALL_PATH" ]; then
MODULES=
ARGS=GET-INSTALL-PATH
else
MODULES=all_modules
ARGS=$@
fi
ONE_SHOT_MAKEFILE=$M $DRV make -C $T -f build/core/main.mk $MODULES $ARGS
fi
fi
}
function mmm()
{
local T=$(gettop)
local DRV=$(getdriver $T)
if [ "$T" ]; then
local MAKEFILE=
local MODULES=
local ARGS=
local DIR TO_CHOP
local GET_INSTALL_PATH=
local DASH_ARGS=$(echo "$@" | awk -v RS=" " -v ORS=" " '/^-.*$/')
local DIRS=$(echo "$@" | awk -v RS=" " -v ORS=" " '/^[^-].*$/')
for DIR in $DIRS ; do
MODULES=`echo $DIR | sed -n -e 's/.*:\(.*$\)/\1/p' | sed 's/,/ /'`
if [ "$MODULES" = "" ]; then
MODULES=all_modules
fi
DIR=`echo $DIR | sed -e 's/:.*//' -e 's:/$::'`
if [ -f $DIR/Android.mk ]; then
local TO_CHOP=`(\cd -P -- $T && pwd -P) | wc -c | tr -d ' '`
local TO_CHOP=`expr $TO_CHOP + 1`
local START=`PWD= /bin/pwd`
local MFILE=`echo $START | cut -c${TO_CHOP}-`
if [ "$MFILE" = "" ] ; then
MFILE=$DIR/Android.mk
else
MFILE=$MFILE/$DIR/Android.mk
fi
MAKEFILE="$MAKEFILE $MFILE"
else
case $DIR in
showcommands | snod | dist | incrementaljavac | *=*) ARGS="$ARGS $DIR";;
GET-INSTALL-PATH) GET_INSTALL_PATH=$DIR;;
*) if [ -d $DIR ]; then
echo "No Android.mk in $DIR.";
else
echo "Couldn't locate the directory $DIR";
fi
return 1;;
esac
fi
done
if [ -n "$GET_INSTALL_PATH" ]; then
ARGS=$GET_INSTALL_PATH
MODULES=
fi
ONE_SHOT_MAKEFILE="$MAKEFILE" $DRV make -C $T -f build/core/main.mk $DASH_ARGS $MODULES $ARGS
else
echo "Couldn't locate the top of the tree. Try setting TOP."
return 1
fi
}
function mma()
{
local T=$(gettop)
local DRV=$(getdriver $T)
if [ -f build/core/envsetup.mk -a -f Makefile ]; then
$DRV make $@
else
if [ ! "$T" ]; then
echo "Couldn't locate the top of the tree. Try setting TOP."
return 1
fi
local MY_PWD=`PWD= /bin/pwd|sed 's:'$T'/::'`
$DRV make -C $T -f build/core/main.mk $@ all_modules BUILD_MODULES_IN_PATHS="$MY_PWD"
fi
}
function mmma()
{
local T=$(gettop)
local DRV=$(getdriver $T)
if [ "$T" ]; then
local DASH_ARGS=$(echo "$@" | awk -v RS=" " -v ORS=" " '/^-.*$/')
local DIRS=$(echo "$@" | awk -v RS=" " -v ORS=" " '/^[^-].*$/')
local MY_PWD=`PWD= /bin/pwd`
if [ "$MY_PWD" = "$T" ]; then
MY_PWD=
else
MY_PWD=`echo $MY_PWD|sed 's:'$T'/::'`
fi
local DIR=
local MODULE_PATHS=
local ARGS=
for DIR in $DIRS ; do
if [ -d $DIR ]; then
if [ "$MY_PWD" = "" ]; then
MODULE_PATHS="$MODULE_PATHS $DIR"
else
MODULE_PATHS="$MODULE_PATHS $MY_PWD/$DIR"
fi
else
case $DIR in
showcommands | snod | dist | incrementaljavac | *=*) ARGS="$ARGS $DIR";;
*) echo "Couldn't find directory $DIR"; return 1;;
esac
fi
done
$DRV make -C $T -f build/core/main.mk $DASH_ARGS $ARGS all_modules BUILD_MODULES_IN_PATHS="$MODULE_PATHS"
else
echo "Couldn't locate the top of the tree. Try setting TOP."
return 1
fi
}
function croot()
{
T=$(gettop)
if [ "$T" ]; then
\cd $(gettop)
else
echo "Couldn't locate the top of the tree. Try setting TOP."
fi
}
function cout()
{
if [ "$OUT" ]; then
cd $OUT
else
echo "Couldn't locate out directory. Try setting OUT."
fi
}
function cproj()
{
TOPFILE=build/core/envsetup.mk
local HERE=$PWD
T=
while [ \( ! \( -f $TOPFILE \) \) -a \( $PWD != "/" \) ]; do
T=$PWD
if [ -f "$T/Android.mk" ]; then
\cd $T
return
fi
\cd ..
done
\cd $HERE
echo "can't find Android.mk"
}
# simplified version of ps; output in the form
# <pid> <procname>
function qpid() {
local prepend=''
local append=''
if [ "$1" = "--exact" ]; then
prepend=' '
append='$'
shift
elif [ "$1" = "--help" -o "$1" = "-h" ]; then
echo "usage: qpid [[--exact] <process name|pid>"
return 255
fi
local EXE="$1"
if [ "$EXE" ] ; then
qpid | \grep "$prepend$EXE$append"
else
adb shell ps \
| tr -d '\r' \
| sed -e 1d -e 's/^[^ ]* *\([0-9]*\).* \([^ ]*\)$/\1 \2/'
fi
}
function pid()
{
local prepend=''
local append=''
if [ "$1" = "--exact" ]; then
prepend=' '
append='$'
shift
fi
local EXE="$1"
if [ "$EXE" ] ; then
local PID=`adb shell ps \
| tr -d '\r' \
| \grep "$prepend$EXE$append" \
| sed -e 's/^[^ ]* *\([0-9]*\).*$/\1/'`
echo "$PID"
else
echo "usage: pid [--exact] <process name>"
return 255
fi
}
# coredump_setup - enable core dumps globally for any process
# that has the core-file-size limit set correctly
#
# NOTE: You must call also coredump_enable for a specific process
# if its core-file-size limit is not set already.
# NOTE: Core dumps are written to ramdisk; they will not survive a reboot!
function coredump_setup()
{
echo "Getting root...";
adb root;
adb wait-for-device;
echo "Remounting root parition read-write...";
adb shell mount -w -o remount -t rootfs rootfs;
sleep 1;
adb wait-for-device;
adb shell mkdir -p /cores;
adb shell mount -t tmpfs tmpfs /cores;
adb shell chmod 0777 /cores;
echo "Granting SELinux permission to dump in /cores...";
adb shell restorecon -R /cores;
echo "Set core pattern.";
adb shell 'echo /cores/core.%p > /proc/sys/kernel/core_pattern';
echo "Done."
}
# coredump_enable - enable core dumps for the specified process
# $1 = PID of process (e.g., $(pid mediaserver))
#
# NOTE: coredump_setup must have been called as well for a core
# dump to actually be generated.
function coredump_enable()
{
local PID=$1;
if [ -z "$PID" ]; then
printf "Expecting a PID!\n";
return;
fi;
echo "Setting core limit for $PID to infinite...";
adb shell prlimit $PID 4 -1 -1
}
# core - send SIGV and pull the core for process
# $1 = PID of process (e.g., $(pid mediaserver))
#
# NOTE: coredump_setup must be called once per boot for core dumps to be
# enabled globally.
function core()
{
local PID=$1;
if [ -z "$PID" ]; then
printf "Expecting a PID!\n";
return;
fi;
local CORENAME=core.$PID;
local COREPATH=/cores/$CORENAME;
local SIG=SEGV;
coredump_enable $1;
local done=0;
while [ $(adb shell "[ -d /proc/$PID ] && echo -n yes") ]; do
printf "\tSending SIG%s to %d...\n" $SIG $PID;
adb shell kill -$SIG $PID;
sleep 1;
done;
adb shell "while [ ! -f $COREPATH ] ; do echo waiting for $COREPATH to be generated; sleep 1; done"
echo "Done: core is under $COREPATH on device.";
}
# systemstack - dump the current stack trace of all threads in the system process
# to the usual ANR traces file
function systemstack()
{
stacks system_server
}
function stacks()
{
if [[ $1 =~ ^[0-9]+$ ]] ; then
local PID="$1"
elif [ "$1" ] ; then
local PIDLIST="$(pid $1)"
if [[ $PIDLIST =~ ^[0-9]+$ ]] ; then
local PID="$PIDLIST"
elif [ "$PIDLIST" ] ; then
echo "more than one process: $1"
else
echo "no such process: $1"
fi
else
echo "usage: stacks [pid|process name]"
fi
if [ "$PID" ] ; then
# Determine whether the process is native
if adb shell ls -l /proc/$PID/exe | grep -q /system/bin/app_process ; then
# Dump stacks of Dalvik process
local TRACES=/data/anr/traces.txt
local ORIG=/data/anr/traces.orig
local TMP=/data/anr/traces.tmp
# Keep original traces to avoid clobbering
adb shell mv $TRACES $ORIG
# Make sure we have a usable file
adb shell touch $TRACES
adb shell chmod 666 $TRACES
# Dump stacks and wait for dump to finish
adb shell kill -3 $PID
adb shell notify $TRACES >/dev/null
# Restore original stacks, and show current output
adb shell mv $TRACES $TMP
adb shell mv $ORIG $TRACES
adb shell cat $TMP
else
# Dump stacks of native process
local USE64BIT="$(is64bit $PID)"
adb shell debuggerd$USE64BIT -b $PID
fi
fi
}
# Read the ELF header from /proc/$PID/exe to determine if the process is
# 64-bit.
function is64bit()
{
local PID="$1"
if [ "$PID" ] ; then
if [[ "$(adb shell cat /proc/$PID/exe | xxd -l 1 -s 4 -ps)" -eq "02" ]] ; then
echo "64"
else
echo ""
fi
else
echo ""
fi
}
function dddclient()
{
local OUT_ROOT=$(get_abs_build_var PRODUCT_OUT)
local OUT_SYMBOLS=$(get_abs_build_var TARGET_OUT_UNSTRIPPED)
local OUT_SO_SYMBOLS=$(get_abs_build_var TARGET_OUT_SHARED_LIBRARIES_UNSTRIPPED)
local OUT_VENDOR_SO_SYMBOLS=$(get_abs_build_var TARGET_OUT_VENDOR_SHARED_LIBRARIES_UNSTRIPPED)
local OUT_EXE_SYMBOLS=$(get_symbols_directory)
local PREBUILTS=$(get_abs_build_var ANDROID_PREBUILTS)
local ARCH=$(get_build_var TARGET_ARCH)
local GDB
case "$ARCH" in
arm) GDB=arm-linux-androideabi-gdb;;
arm64) GDB=arm-linux-androideabi-gdb; GDB64=aarch64-linux-android-gdb;;
mips|mips64) GDB=mips64el-linux-android-gdb;;
x86) GDB=x86_64-linux-android-gdb;;
x86_64) GDB=x86_64-linux-android-gdb;;
*) echo "Unknown arch $ARCH"; return 1;;
esac
if [ "$OUT_ROOT" -a "$PREBUILTS" ]; then
local EXE="$1"
if [ "$EXE" ] ; then
EXE=$1
if [[ $EXE =~ ^[^/].* ]] ; then
EXE="system/bin/"$EXE
fi
else
EXE="app_process"
fi
local PORT="$2"
if [ "$PORT" ] ; then
PORT=$2
else
PORT=":5039"
fi
local PID="$3"
if [ "$PID" ] ; then
if [[ ! "$PID" =~ ^[0-9]+$ ]] ; then
PID=`pid $3`
if [[ ! "$PID" =~ ^[0-9]+$ ]] ; then
# that likely didn't work because of returning multiple processes
# try again, filtering by root processes (don't contain colon)
PID=`adb shell ps | \grep $3 | \grep -v ":" | awk '{print $2}'`
if [[ ! "$PID" =~ ^[0-9]+$ ]]
then
echo "Couldn't resolve '$3' to single PID"
return 1
else
echo ""
echo "WARNING: multiple processes matching '$3' observed, using root process"
echo ""
fi
fi
fi
adb forward "tcp$PORT" "tcp$PORT"
local USE64BIT="$(is64bit $PID)"
adb shell gdbserver$USE64BIT $PORT --attach $PID &
sleep 2
else
echo ""
echo "If you haven't done so already, do this first on the device:"
echo " gdbserver $PORT /system/bin/$EXE"
echo " or"
echo " gdbserver $PORT --attach <PID>"
echo ""
fi
OUT_SO_SYMBOLS=$OUT_SO_SYMBOLS$USE64BIT
OUT_VENDOR_SO_SYMBOLS=$OUT_VENDOR_SO_SYMBOLS$USE64BIT
echo >|"$OUT_ROOT/gdbclient.cmds" "set solib-absolute-prefix $OUT_SYMBOLS"
echo >>"$OUT_ROOT/gdbclient.cmds" "set solib-search-path $OUT_SO_SYMBOLS:$OUT_SO_SYMBOLS/hw:$OUT_SO_SYMBOLS/ssl/engines:$OUT_SO_SYMBOLS/drm:$OUT_SO_SYMBOLS/egl:$OUT_SO_SYMBOLS/soundfx:$OUT_VENDOR_SO_SYMBOLS:$OUT_VENDOR_SO_SYMBOLS/hw:$OUT_VENDOR_SO_SYMBOLS/egl"
echo >>"$OUT_ROOT/gdbclient.cmds" "source $ANDROID_BUILD_TOP/development/scripts/gdb/dalvik.gdb"
echo >>"$OUT_ROOT/gdbclient.cmds" "target remote $PORT"
# Enable special debugging for ART processes.
if [[ $EXE =~ (^|/)(app_process|dalvikvm)(|32|64)$ ]]; then
echo >> "$OUT_ROOT/gdbclient.cmds" "art-on"
fi
echo >>"$OUT_ROOT/gdbclient.cmds" ""
local WHICH_GDB=
# 64-bit exe found
if [ "$USE64BIT" != "" ] ; then
WHICH_GDB=$ANDROID_TOOLCHAIN/$GDB64
# 32-bit exe / 32-bit platform
elif [ "$(get_build_var TARGET_2ND_ARCH)" = "" ]; then
WHICH_GDB=$ANDROID_TOOLCHAIN/$GDB
# 32-bit exe / 64-bit platform
else
WHICH_GDB=$ANDROID_TOOLCHAIN_2ND_ARCH/$GDB
fi
ddd --debugger $WHICH_GDB -x "$OUT_ROOT/gdbclient.cmds" "$OUT_EXE_SYMBOLS/$EXE"
else
echo "Unable to determine build system output dir."
fi
}
case `uname -s` in
Darwin)
function sgrep()
{
find -E . -name .repo -prune -o -name .git -prune -o -type f -iregex '.*\.(c|h|cc|cpp|S|java|xml|sh|mk|aidl)' -print0 | xargs -0 grep --color -n "$@"
}
;;
*)
function sgrep()
{
find . -name .repo -prune -o -name .git -prune -o -type f -iregex '.*\.\(c\|h\|cc\|cpp\|S\|java\|xml\|sh\|mk\|aidl\)' -print0 | xargs -0 grep --color -n "$@"
}
;;
esac
function gettargetarch
{
get_build_var TARGET_ARCH
}
function ggrep()
{
find . -name .repo -prune -o -name .git -prune -o -name out -prune -o -type f -name "*\.gradle" -print0 | xargs -0 grep --color -n "$@"
}
function jgrep()
{
find . -name .repo -prune -o -name .git -prune -o -name out -prune -o -type f -name "*\.java" -print0 | xargs -0 grep --color -n "$@"
}
function cgrep()
{
find . -name .repo -prune -o -name .git -prune -o -name out -prune -o -type f \( -name '*.c' -o -name '*.cc' -o -name '*.cpp' -o -name '*.h' -o -name '*.hpp' \) -print0 | xargs -0 grep --color -n "$@"
}
function resgrep()
{
for dir in `find . -name .repo -prune -o -name .git -prune -o -name out -prune -o -name res -type d`; do find $dir -type f -name '*\.xml' -print0 | xargs -0 grep --color -n "$@"; done;
}
function mangrep()
{
find . -name .repo -prune -o -name .git -prune -o -path ./out -prune -o -type f -name 'AndroidManifest.xml' -print0 | xargs -0 grep --color -n "$@"
}
function sepgrep()
{
find . -name .repo -prune -o -name .git -prune -o -path ./out -prune -o -name sepolicy -type d -print0 | xargs -0 grep --color -n -r --exclude-dir=\.git "$@"
}
function rcgrep()
{
find . -name .repo -prune -o -name .git -prune -o -name out -prune -o -type f -name "*\.rc*" -print0 | xargs -0 grep --color -n "$@"
}
case `uname -s` in
Darwin)
function mgrep()
{
find -E . -name .repo -prune -o -name .git -prune -o -path ./out -prune -o -type f -iregex '.*/(Makefile|Makefile\..*|.*\.make|.*\.mak|.*\.mk)' -print0 | xargs -0 grep --color -n "$@"
}
function treegrep()
{
find -E . -name .repo -prune -o -name .git -prune -o -type f -iregex '.*\.(c|h|cpp|S|java|xml)' -print0 | xargs -0 grep --color -n -i "$@"
}
;;
*)
function mgrep()
{
find . -name .repo -prune -o -name .git -prune -o -path ./out -prune -o -regextype posix-egrep -iregex '(.*\/Makefile|.*\/Makefile\..*|.*\.make|.*\.mak|.*\.mk)' -type f -print0 | xargs -0 grep --color -n "$@"
}
function treegrep()
{
find . -name .repo -prune -o -name .git -prune -o -regextype posix-egrep -iregex '.*\.(c|h|cpp|S|java|xml)' -type f -print0 | xargs -0 grep --color -n -i "$@"
}
;;
esac
function getprebuilt
{
get_abs_build_var ANDROID_PREBUILTS
}
function tracedmdump()
{
T=$(gettop)
if [ ! "$T" ]; then
echo "Couldn't locate the top of the tree. Try setting TOP."
return
fi
local prebuiltdir=$(getprebuilt)
local arch=$(gettargetarch)
local KERNEL=$T/prebuilts/qemu-kernel/$arch/vmlinux-qemu
local TRACE=$1
if [ ! "$TRACE" ] ; then
echo "usage: tracedmdump tracename"
return
fi
if [ ! -r "$KERNEL" ] ; then
echo "Error: cannot find kernel: '$KERNEL'"
return
fi
local BASETRACE=$(basename $TRACE)
if [ "$BASETRACE" = "$TRACE" ] ; then
TRACE=$ANDROID_PRODUCT_OUT/traces/$TRACE
fi
echo "post-processing traces..."
rm -f $TRACE/qtrace.dexlist
post_trace $TRACE
if [ $? -ne 0 ]; then
echo "***"
echo "*** Error: malformed trace. Did you remember to exit the emulator?"
echo "***"
return
fi
echo "generating dexlist output..."
/bin/ls $ANDROID_PRODUCT_OUT/system/framework/*.jar $ANDROID_PRODUCT_OUT/system/app/*.apk $ANDROID_PRODUCT_OUT/data/app/*.apk 2>/dev/null | xargs dexlist > $TRACE/qtrace.dexlist
echo "generating dmtrace data..."
q2dm -r $ANDROID_PRODUCT_OUT/symbols $TRACE $KERNEL $TRACE/dmtrace || return
echo "generating html file..."
dmtracedump -h $TRACE/dmtrace >| $TRACE/dmtrace.html || return
echo "done, see $TRACE/dmtrace.html for details"
echo "or run:"
echo " traceview $TRACE/dmtrace"
}
# communicate with a running device or emulator, set up necessary state,
# and run the hat command.
function runhat()
{
# process standard adb options
local adbTarget=""
if [ "$1" = "-d" -o "$1" = "-e" ]; then
adbTarget=$1
shift 1
elif [ "$1" = "-s" ]; then
adbTarget="$1 $2"
shift 2
fi
local adbOptions=${adbTarget}
#echo adbOptions = ${adbOptions}
# runhat options
local targetPid=$1
if [ "$targetPid" = "" ]; then
echo "Usage: runhat [ -d | -e | -s serial ] target-pid"
return
fi
# confirm hat is available
if [ -z $(which hat) ]; then
echo "hat is not available in this configuration."
return
fi
# issue "am" command to cause the hprof dump
local devFile=/data/local/tmp/hprof-$targetPid
echo "Poking $targetPid and waiting for data..."
echo "Storing data at $devFile"
adb ${adbOptions} shell am dumpheap $targetPid $devFile
echo "Press enter when logcat shows \"hprof: heap dump completed\""
echo -n "> "
read
local localFile=/tmp/$$-hprof
echo "Retrieving file $devFile..."
adb ${adbOptions} pull $devFile $localFile
adb ${adbOptions} shell rm $devFile
echo "Running hat on $localFile"
echo "View the output by pointing your browser at http://localhost:7000/"
echo ""
hat -JXmx512m $localFile
}
function getbugreports()
{
local reports=(`adb shell ls /sdcard/bugreports | tr -d '\r'`)
if [ ! "$reports" ]; then
echo "Could not locate any bugreports."
return
fi
local report
for report in ${reports[@]}
do
echo "/sdcard/bugreports/${report}"
adb pull /sdcard/bugreports/${report} ${report}
gunzip ${report}
done
}
function getsdcardpath()
{
adb ${adbOptions} shell echo -n \$\{EXTERNAL_STORAGE\}
}
function getscreenshotpath()
{
echo "$(getsdcardpath)/Pictures/Screenshots"
}
function getlastscreenshot()
{
local screenshot_path=$(getscreenshotpath)
local screenshot=`adb ${adbOptions} ls ${screenshot_path} | grep Screenshot_[0-9-]*.*\.png | sort -rk 3 | cut -d " " -f 4 | head -n 1`
if [ "$screenshot" = "" ]; then
echo "No screenshots found."
return
fi
echo "${screenshot}"
adb ${adbOptions} pull ${screenshot_path}/${screenshot}
}
function startviewserver()
{
local port=4939
if [ $# -gt 0 ]; then
port=$1
fi
adb shell service call window 1 i32 $port
}
function stopviewserver()
{
adb shell service call window 2
}
function isviewserverstarted()
{
adb shell service call window 3
}
function key_home()
{
adb shell input keyevent 3
}
function key_back()
{
adb shell input keyevent 4
}
function key_menu()
{
adb shell input keyevent 82
}
function smoketest()
{
if [ ! "$ANDROID_PRODUCT_OUT" ]; then
echo "Couldn't locate output files. Try running 'lunch' first." >&2
return
fi
T=$(gettop)
if [ ! "$T" ]; then
echo "Couldn't locate the top of the tree. Try setting TOP." >&2
return
fi
(\cd "$T" && mmm tests/SmokeTest) &&
adb uninstall com.android.smoketest > /dev/null &&
adb uninstall com.android.smoketest.tests > /dev/null &&
adb install $ANDROID_PRODUCT_OUT/data/app/SmokeTestApp.apk &&
adb install $ANDROID_PRODUCT_OUT/data/app/SmokeTest.apk &&
adb shell am instrument -w com.android.smoketest.tests/android.test.InstrumentationTestRunner
}
# simple shortcut to the runtest command
function runtest()
{
T=$(gettop)
if [ ! "$T" ]; then
echo "Couldn't locate the top of the tree. Try setting TOP." >&2
return
fi
("$T"/development/testrunner/runtest.py $@)
}
function godir () {
if [[ -z "$1" ]]; then
echo "Usage: godir <regex>"
return
fi
T=$(gettop)
if [ ! "$OUT_DIR" = "" ]; then
mkdir -p $OUT_DIR
FILELIST=$OUT_DIR/filelist
else
FILELIST=$T/filelist
fi
if [[ ! -f $FILELIST ]]; then
echo -n "Creating index..."
(\cd $T; find . -wholename ./out -prune -o -wholename ./.repo -prune -o -type f > $FILELIST)
echo " Done"
echo ""
fi
local lines
lines=($(\grep "$1" $FILELIST | sed -e 's/\/[^/]*$//' | sort | uniq))
if [[ ${#lines[@]} = 0 ]]; then
echo "Not found"
return
fi
local pathname
local choice
if [[ ${#lines[@]} > 1 ]]; then
while [[ -z "$pathname" ]]; do
local index=1
local line
for line in ${lines[@]}; do
printf "%6s %s\n" "[$index]" $line
index=$(($index + 1))
done
echo
echo -n "Select one: "
unset choice
read choice
if [[ $choice -gt ${#lines[@]} || $choice -lt 1 ]]; then
echo "Invalid choice"
continue
fi
pathname=${lines[$(($choice-1))]}
done
else
pathname=${lines[0]}
fi
\cd $T/$pathname
}
function cmremote()
{
git remote rm cmremote 2> /dev/null
GERRIT_REMOTE=$(git config --get remote.github.projectname)
if [ -z "$GERRIT_REMOTE" ]
then
echo Unable to set up the git remote, are you under a git repo?
return 0
fi
CMUSER=$(git config --get review.review.cyanogenmod.org.username)
if [ -z "$CMUSER" ]
then
git remote add cmremote ssh://review.cyanogenmod.org:29418/$GERRIT_REMOTE
else
git remote add cmremote ssh://[email protected]:29418/$GERRIT_REMOTE
fi
echo You can now push to "cmremote".
}
function aospremote()
{
git remote rm aosp 2> /dev/null
if [ ! -d .git ]
then
echo .git directory not found. Please run this from the root directory of the Android repository you wish to set up.
fi
PROJECT=`pwd -P | sed s#$ANDROID_BUILD_TOP/##g`
if (echo $PROJECT | grep -qv "^device")
then
PFX="platform/"
fi
git remote add aosp https://android.googlesource.com/$PFX$PROJECT
echo "Remote 'aosp' created"
}
function cafremote()
{
git remote rm caf 2> /dev/null
if [ ! -d .git ]
then
echo .git directory not found. Please run this from the root directory of the Android repository you wish to set up.
fi
PROJECT=`pwd -P | sed s#$ANDROID_BUILD_TOP/##g`
if (echo $PROJECT | grep -qv "^device")
then
PFX="platform/"
fi
git remote add caf git://codeaurora.org/$PFX$PROJECT
echo "Remote 'caf' created"
}
function installboot()
{
if [ ! -e "$OUT/recovery/root/etc/recovery.fstab" ];
then
echo "No recovery.fstab found. Build recovery first."
return 1
fi
if [ ! -e "$OUT/boot.img" ];
then
echo "No boot.img found. Run make bootimage first."
return 1
fi
PARTITION=`grep "^\/boot" $OUT/recovery/root/etc/recovery.fstab | awk {'print $3'}`
if [ -z "$PARTITION" ];
then
# Try for RECOVERY_FSTAB_VERSION = 2
PARTITION=`grep "[[:space:]]\/boot[[:space:]]" $OUT/recovery/root/etc/recovery.fstab | awk {'print $1'}`
PARTITION_TYPE=`grep "[[:space:]]\/boot[[:space:]]" $OUT/recovery/root/etc/recovery.fstab | awk {'print $3'}`
if [ -z "$PARTITION" ];
then
echo "Unable to determine boot partition."
return 1
fi
fi
adb start-server
adb wait-for-online
adb root
sleep 1
adb wait-for-online shell mount /system 2>&1 > /dev/null
adb wait-for-online remount
if (adb shell getprop ro.ant.device | grep -q "$ANT_BUILD");
then
adb push $OUT/boot.img /cache/
for i in $OUT/system/lib/modules/*;
do
adb push $i /system/lib/modules/
done
adb shell dd if=/cache/boot.img of=$PARTITION
adb shell chmod 644 /system/lib/modules/*
echo "Installation complete."
else
echo "The connected device does not appear to be $ANT_BUILD, run away!"
fi
}
function installrecovery()
{
if [ ! -e "$OUT/recovery/root/etc/recovery.fstab" ];
then
echo "No recovery.fstab found. Build recovery first."
return 1
fi
if [ ! -e "$OUT/recovery.img" ];
then
echo "No recovery.img found. Run make recoveryimage first."
return 1
fi
PARTITION=`grep "^\/recovery" $OUT/recovery/root/etc/recovery.fstab | awk {'print $3'}`
if [ -z "$PARTITION" ];
then
# Try for RECOVERY_FSTAB_VERSION = 2
PARTITION=`grep "[[:space:]]\/recovery[[:space:]]" $OUT/recovery/root/etc/recovery.fstab | awk {'print $1'}`
PARTITION_TYPE=`grep "[[:space:]]\/recovery[[:space:]]" $OUT/recovery/root/etc/recovery.fstab | awk {'print $3'}`
if [ -z "$PARTITION" ];
then
echo "Unable to determine recovery partition."
return 1
fi
fi
adb start-server
adb wait-for-online
adb root
sleep 1
adb wait-for-online shell mount /system 2>&1 >> /dev/null
adb wait-for-online remount
if (adb shell getprop ro.ant.device | grep -q "$ANT_BUILD");
then
adb push $OUT/recovery.img /cache/
adb shell dd if=/cache/recovery.img of=$PARTITION
echo "Installation complete."
else
echo "The connected device does not appear to be $ANT_BUILD, run away!"
fi
}
function makerecipe() {
if [ -z "$1" ]
then
echo "No branch name provided."
return 1
fi
cd android
sed -i s/'default revision=.*'/'default revision="refs\/heads\/'$1'"'/ default.xml
git commit -a -m "$1"
cd ..
repo forall -c '
if [ "$REPO_REMOTE" == "github" ]
then
pwd
cmremote
git push cmremote HEAD:refs/heads/'$1'
fi
'
}
function cmgerrit() {
if [ $# -eq 0 ]; then
$FUNCNAME help
return 1
fi
local user=`git config --get review.review.cyanogenmod.org.username`
local review=`git config --get remote.github.review`
local project=`git config --get remote.github.projectname`
local command=$1
shift
case $command in
help)
if [ $# -eq 0 ]; then
cat <<EOF
Usage:
$FUNCNAME COMMAND [OPTIONS] [CHANGE-ID[/PATCH-SET]][{@|^|~|:}ARG] [-- ARGS]
Commands:
fetch Just fetch the change as FETCH_HEAD
help Show this help, or for a specific command
pull Pull a change into current branch
push Push HEAD or a local branch to Gerrit for a specific branch
Any other Git commands that support refname would work as:
git fetch URL CHANGE && git COMMAND OPTIONS FETCH_HEAD{@|^|~|:}ARG -- ARGS
See '$FUNCNAME help COMMAND' for more information on a specific command.
Example:
$FUNCNAME checkout -b topic 1234/5
works as:
git fetch http://DOMAIN/p/PROJECT refs/changes/34/1234/5 \\
&& git checkout -b topic FETCH_HEAD
will checkout a new branch 'topic' base on patch-set 5 of change 1234.
Patch-set 1 will be fetched if omitted.
EOF
return
fi
case $1 in
__cmg_*) echo "For internal use only." ;;
changes|for)
if [ "$FUNCNAME" = "cmgerrit" ]; then
echo "'$FUNCNAME $1' is deprecated."
fi
;;
help) $FUNCNAME help ;;
fetch|pull) cat <<EOF
usage: $FUNCNAME $1 [OPTIONS] CHANGE-ID[/PATCH-SET]
works as:
git $1 OPTIONS http://DOMAIN/p/PROJECT \\
refs/changes/HASH/CHANGE-ID/{PATCH-SET|1}
Example:
$FUNCNAME $1 1234
will $1 patch-set 1 of change 1234
EOF
;;
push) cat <<EOF
usage: $FUNCNAME push [OPTIONS] [LOCAL_BRANCH:]REMOTE_BRANCH
works as:
git push OPTIONS ssh://USER@DOMAIN:29418/PROJECT \\
{LOCAL_BRANCH|HEAD}:refs/for/REMOTE_BRANCH
Example:
$FUNCNAME push fix6789:gingerbread
will push local branch 'fix6789' to Gerrit for branch 'gingerbread'.
HEAD will be pushed from local if omitted.
EOF
;;
*)
$FUNCNAME __cmg_err_not_supported $1 && return
cat <<EOF
usage: $FUNCNAME $1 [OPTIONS] CHANGE-ID[/PATCH-SET][{@|^|~|:}ARG] [-- ARGS]
works as:
git fetch http://DOMAIN/p/PROJECT \\
refs/changes/HASH/CHANGE-ID/{PATCH-SET|1} \\
&& git $1 OPTIONS FETCH_HEAD{@|^|~|:}ARG -- ARGS
EOF
;;
esac
;;
__cmg_get_ref)
$FUNCNAME __cmg_err_no_arg $command $# && return 1
local change_id patchset_id hash
case $1 in
*/*)
change_id=${1%%/*}
patchset_id=${1#*/}
;;
*)
change_id=$1
patchset_id=1
;;
esac
hash=$(($change_id % 100))
case $hash in
[0-9]) hash="0$hash" ;;
esac
echo "refs/changes/$hash/$change_id/$patchset_id"
;;
fetch|pull)
$FUNCNAME __cmg_err_no_arg $command $# help && return 1
$FUNCNAME __cmg_err_not_repo && return 1
local change=$1
shift
git $command $@ http://$review/p/$project \
$($FUNCNAME __cmg_get_ref $change) || return 1
;;
push)
$FUNCNAME __cmg_err_no_arg $command $# help && return 1
$FUNCNAME __cmg_err_not_repo && return 1
if [ -z "$user" ]; then
echo >&2 "Gerrit username not found."
return 1
fi
local local_branch remote_branch
case $1 in
*:*)
local_branch=${1%:*}
remote_branch=${1##*:}
;;
*)
local_branch=HEAD
remote_branch=$1
;;
esac
shift
git push $@ ssh://$user@$review:29418/$project \
$local_branch:refs/for/$remote_branch || return 1
;;
changes|for)
if [ "$FUNCNAME" = "cmgerrit" ]; then
echo >&2 "'$FUNCNAME $command' is deprecated."
fi
;;
__cmg_err_no_arg)
if [ $# -lt 2 ]; then
echo >&2 "'$FUNCNAME $command' missing argument."
elif [ $2 -eq 0 ]; then
if [ -n "$3" ]; then
$FUNCNAME help $1
else
echo >&2 "'$FUNCNAME $1' missing argument."
fi
else
return 1
fi
;;
__cmg_err_not_repo)
if [ -z "$review" -o -z "$project" ]; then
echo >&2 "Not currently in any reviewable repository."
else
return 1
fi
;;
__cmg_err_not_supported)
$FUNCNAME __cmg_err_no_arg $command $# && return
case $1 in
#TODO: filter more git commands that don't use refname
init|add|rm|mv|status|clone|remote|bisect|config|stash)
echo >&2 "'$FUNCNAME $1' is not supported."
;;
*) return 1 ;;
esac
;;
#TODO: other special cases?
*)
$FUNCNAME __cmg_err_not_supported $command && return 1
$FUNCNAME __cmg_err_no_arg $command $# help && return 1
$FUNCNAME __cmg_err_not_repo && return 1
local args="$@"
local change pre_args refs_arg post_args
case "$args" in
*--\ *)
pre_args=${args%%-- *}
post_args="-- ${args#*-- }"
;;
*) pre_args="$args" ;;
esac
args=($pre_args)
pre_args=
if [ ${#args[@]} -gt 0 ]; then
change=${args[${#args[@]}-1]}
fi
if [ ${#args[@]} -gt 1 ]; then
pre_args=${args[0]}
for ((i=1; i<${#args[@]}-1; i++)); do
pre_args="$pre_args ${args[$i]}"
done
fi
while ((1)); do
case $change in
""|--)
$FUNCNAME help $command
return 1
;;
*@*)
if [ -z "$refs_arg" ]; then
refs_arg="@${change#*@}"
change=${change%%@*}
fi
;;
*~*)
if [ -z "$refs_arg" ]; then
refs_arg="~${change#*~}"
change=${change%%~*}
fi
;;
*^*)
if [ -z "$refs_arg" ]; then
refs_arg="^${change#*^}"
change=${change%%^*}
fi
;;
*:*)
if [ -z "$refs_arg" ]; then
refs_arg=":${change#*:}"
change=${change%%:*}
fi
;;
*) break ;;
esac
done
$FUNCNAME fetch $change \
&& git $command $pre_args FETCH_HEAD$refs_arg $post_args \
|| return 1
;;
esac
}
function cmrebase() {
local repo=$1
local refs=$2
local pwd="$(pwd)"
local dir="$(gettop)/$repo"
if [ -z $repo ] || [ -z $refs ]; then
echo "CyanogenMod Gerrit Rebase Usage: "
echo " cmrebase <path to project> <patch IDs on Gerrit>"
echo " The patch IDs appear on the Gerrit commands that are offered."
echo " They consist on a series of numbers and slashes, after the text"
echo " refs/changes. For example, the ID in the following command is 26/8126/2"
echo ""
echo " git[...]ges_apps_Camera refs/changes/26/8126/2 && git cherry-pick FETCH_HEAD"
echo ""
return
fi
if [ ! -d $dir ]; then
echo "Directory $dir doesn't exist in tree."
return
fi
cd $dir
repo=$(cat .git/config | grep git://github.com | awk '{ print $NF }' | sed s#git://github.com/##g)
echo "Starting branch..."
repo start tmprebase .
echo "Bringing it up to date..."
repo sync .
echo "Fetching change..."
git fetch "http://review.cyanogenmod.org/p/$repo" "refs/changes/$refs" && git cherry-pick FETCH_HEAD
if [ "$?" != "0" ]; then
echo "Error cherry-picking. Not uploading!"
return
fi
echo "Uploading..."
repo upload .
echo "Cleaning up..."
repo abandon tmprebase .
cd $pwd
}
function mka() {
local T=$(gettop)
if [ "$T" ]; then
case `uname -s` in
Darwin)
make -C $T -j `sysctl hw.ncpu|cut -d" " -f2` "$@"
;;
*)
mk_timer schedtool -B -n 1 -e ionice -n 1 make -C $T -j$(cat /proc/cpuinfo | grep "^processor" | wc -l) "$@"
;;
esac
else
echo "Couldn't locate the top of the tree. Try setting TOP."
fi
}
function cmka() {
if [ ! -z "$1" ]; then
for i in "$@"; do
case $i in
bacon|otapackage|systemimage)
mka installclean
mka $i
;;
*)
mka clean-$i
mka $i
;;
esac
done
else
mka clean
mka
fi
}
function repolastsync() {
RLSPATH="$ANDROID_BUILD_TOP/.repo/.repo_fetchtimes.json"
RLSLOCAL=$(date -d "$(stat -c %z $RLSPATH)" +"%e %b %Y, %T %Z")
RLSUTC=$(date -d "$(stat -c %z $RLSPATH)" -u +"%e %b %Y, %T %Z")
echo "Last repo sync: $RLSLOCAL / $RLSUTC"
}
function reposync() {
case `uname -s` in
Darwin)
repo sync -j 4 "$@"
;;
*)
schedtool -B -n 1 -e ionice -n 1 `which repo` sync -j 4 "$@"
;;
esac
}
function repodiff() {
if [ -z "$*" ]; then
echo "Usage: repodiff <ref-from> [[ref-to] [--numstat]]"
return
fi
diffopts=$* repo forall -c \
'echo "$REPO_PATH ($REPO_REMOTE)"; git diff ${diffopts} 2>/dev/null ;'
}
# Credit for color strip sed: http://goo.gl/BoIcm
function dopush()
{
local func=$1
shift
adb start-server # Prevent unexpected starting server message from adb get-state in the next line
if [ $(adb get-state) != device -a $(adb shell test -e /sbin/recovery 2> /dev/null; echo $?) != 0 ] ; then
echo "No device is online. Waiting for one..."
echo "Please connect USB and/or enable USB debugging"
until [ $(adb get-state) = device -o $(adb shell test -e /sbin/recovery 2> /dev/null; echo $?) = 0 ];do
sleep 1
done
echo "Device Found."
fi
if (adb shell getprop ro.ant.device | grep -q "$ANT_BUILD") || [ "$FORCE_PUSH" == "true" ];
then
# retrieve IP and PORT info if we're using a TCP connection
TCPIPPORT=$(adb devices | egrep '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+:[0-9]+[^0-9]+' \
| head -1 | awk '{print $1}')
adb root &> /dev/null
sleep 0.3
if [ -n "$TCPIPPORT" ]
then
# adb root just killed our connection
# so reconnect...
adb connect "$TCPIPPORT"
fi
adb wait-for-device &> /dev/null
sleep 0.3
adb remount &> /dev/null
mkdir -p $OUT
($func $*|tee $OUT/.log;return ${PIPESTATUS[0]})
ret=$?;
if [ $ret -ne 0 ]; then
rm -f $OUT/.log;return $ret
fi
# Install: <file>
LOC="$(cat $OUT/.log | sed -r 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' | grep '^Install: ' | cut -d ':' -f 2)"
# Copy: <file>
LOC="$LOC $(cat $OUT/.log | sed -r 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' | grep '^Copy: ' | cut -d ':' -f 2)"
# If any files are going to /data, push an octal file permissions reader to device
if [ -n "$(echo $LOC | egrep '(^|\s)/data')" ]; then
CHKPERM="/data/local/tmp/chkfileperm.sh"
(
cat <<'EOF'
#!/system/xbin/sh
FILE=$@
if [ -e $FILE ]; then
ls -l $FILE | awk '{k=0;for(i=0;i<=8;i++)k+=((substr($1,i+2,1)~/[rwx]/)*2^(8-i));if(k)printf("%0o ",k);print}' | cut -d ' ' -f1
fi
EOF
) > $OUT/.chkfileperm.sh
echo "Pushing file permissions checker to device"
adb push $OUT/.chkfileperm.sh $CHKPERM
adb shell chmod 755 $CHKPERM
rm -f $OUT/.chkfileperm.sh
fi
stop_n_start=false
for FILE in $(echo $LOC | tr " " "\n"); do
# Make sure file is in $OUT/system or $OUT/data
case $FILE in
$OUT/system/*|$OUT/data/*)
# Get target file name (i.e. /system/bin/adb)
TARGET=$(echo $FILE | sed "s#$OUT##")
;;
*) continue ;;
esac
case $TARGET in
/data/*)
# fs_config only sets permissions and se labels for files pushed to /system
if [ -n "$CHKPERM" ]; then
OLDPERM=$(adb shell $CHKPERM $TARGET)
OLDPERM=$(echo $OLDPERM | tr -d '\r' | tr -d '\n')
OLDOWN=$(adb shell ls -al $TARGET | awk '{print $2}')
OLDGRP=$(adb shell ls -al $TARGET | awk '{print $3}')
fi
echo "Pushing: $TARGET"
adb push $FILE $TARGET
if [ -n "$OLDPERM" ]; then
echo "Setting file permissions: $OLDPERM, $OLDOWN":"$OLDGRP"
adb shell chown "$OLDOWN":"$OLDGRP" $TARGET
adb shell chmod "$OLDPERM" $TARGET
else
echo "$TARGET did not exist previously, you should set file permissions manually"
fi
adb shell restorecon "$TARGET"
;;
/system/priv-app/SystemUI/SystemUI.apk|/system/framework/*)
# Only need to stop services once
if ! $stop_n_start; then
adb shell stop
stop_n_start=true
fi
echo "Pushing: $TARGET"
adb push $FILE $TARGET
;;
*)
echo "Pushing: $TARGET"
adb push $FILE $TARGET
;;
esac
done
if [ -n "$CHKPERM" ]; then
adb shell rm $CHKPERM
fi
if $stop_n_start; then
adb shell start
fi
rm -f $OUT/.log
return 0
else
echo "The connected device does not appear to be $ANT_BUILD, run away!"
fi
}
alias mmp='dopush mm'
alias mmmp='dopush mmm'
alias mkap='dopush mka'
alias cmkap='dopush cmka'
function repopick() {
T=$(gettop)
$T/build/tools/repopick.py $@
}
function fixup_common_out_dir() {
common_out_dir=$(get_build_var OUT_DIR)/target/common
target_device=$(get_build_var TARGET_DEVICE)
if [ ! -z $ANT_FIXUP_COMMON_OUT ]; then
if [ -d ${common_out_dir} ] && [ ! -L ${common_out_dir} ]; then
mv ${common_out_dir} ${common_out_dir}-${target_device}
ln -s ${common_out_dir}-${target_device} ${common_out_dir}
else
[ -L ${common_out_dir} ] && rm ${common_out_dir}
mkdir -p ${common_out_dir}-${target_device}
ln -s ${common_out_dir}-${target_device} ${common_out_dir}
fi
else
[ -L ${common_out_dir} ] && rm ${common_out_dir}
mkdir -p ${common_out_dir}
fi
}
# Force JAVA_HOME to point to java 1.7 if it isn't already set.
#
# Note that the MacOS path for java 1.7 includes a minor revision number (sigh).
# For some reason, installing the JDK doesn't make it show up in the
# JavaVM.framework/Versions/1.7/ folder.
function set_java_home() {
# Clear the existing JAVA_HOME value if we set it ourselves, so that
# we can reset it later, depending on the version of java the build
# system needs.
#
# If we don't do this, the JAVA_HOME value set by the first call to
# build/envsetup.sh will persist forever.
if [ -n "$ANDROID_SET_JAVA_HOME" ]; then
export JAVA_HOME=""
fi
if [ ! "$JAVA_HOME" ]; then
case `uname -s` in
Darwin)
export JAVA_HOME=$(/usr/libexec/java_home -v 1.7)
;;
*)
export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
;;
esac
# Keep track of the fact that we set JAVA_HOME ourselves, so that
# we can change it on the next envsetup.sh, if required.
export ANDROID_SET_JAVA_HOME=true
fi
}
# Print colored exit condition
function pez {
"$@"
local retval=$?
if [ $retval -ne 0 ]
then
printf "\e[0;31mFAILURE\e[00m\n"
else
printf "\e[0;32mSUCCESS\e[00m\n"
fi
return $retval
}
function get_make_command()
{
echo command make
}
function mk_timer()
{
local start_time=$(date +"%s")
$@
local ret=$?
local end_time=$(date +"%s")
local tdiff=$(($end_time-$start_time))
local hours=$(($tdiff / 3600 ))
local mins=$((($tdiff % 3600) / 60))
local secs=$(($tdiff % 60))
local ncolors=$(tput colors 2>/dev/null)
if [ -n "$ncolors" ] && [ $ncolors -ge 8 ]; then
color_failed="\e[0;31m"
color_success="\e[0;32m"
color_reset="\e[0m"
else
color_failed=""
color_success=""
color_reset=""
fi
echo
if [ $ret -eq 0 ] ; then
printf "${color_success}#### make completed successfully "
else
printf "${color_failed}#### make failed to build some targets "
fi
if [ $hours -gt 0 ] ; then
printf "(%02g:%02g:%02g (hh:mm:ss))" $hours $mins $secs
elif [ $mins -gt 0 ] ; then
printf "(%02g:%02g (mm:ss))" $mins $secs
elif [ $secs -gt 0 ] ; then
printf "(%s seconds)" $secs
fi
printf " ####${color_reset}\n\n"
return $ret
}
function make()
{
mk_timer $(get_make_command) "$@"
}
if [ "x$SHELL" != "x/bin/bash" ]; then
case `ps -o command -p $$` in
*bash*)
;;
*zsh*)
;;
*)
echo "WARNING: Only bash and zsh are supported, use of other shell may lead to erroneous results"
;;
esac
fi
# Execute the contents of any vendorsetup.sh files we can find.
for f in `test -d device && find -L device -maxdepth 4 -name 'vendorsetup.sh' 2> /dev/null | sort` \
`test -d vendor && find -L vendor -maxdepth 4 -name 'vendorsetup.sh' 2> /dev/null | sort`
do
echo "including $f"
. $f
done
unset f
# Add completions
check_bash_version && {
dirs="sdk/bash_completion vendor/antos/bash_completion"
for dir in $dirs; do
if [ -d ${dir} ]; then
for f in `/bin/ls ${dir}/[a-z]*.bash 2> /dev/null`; do
echo "including $f"
. $f
done
fi
done
}
export ANDROID_BUILD_TOP=$(gettop)
|
Ant-OS/android_build
|
envsetup.sh
|
Shell
|
apache-2.0
| 72,293 |
#!/usr/bin/env bash
set -euo pipefail
# copies logs from the remote machine to a local temporary directory
REMOTE_HOST=$1
NAME=$2
REMOTE_PORT=${REMOTE_PORT:-22}
REMOTE_USER=${REMOTE_USER:-core}
SSH_OPTS=${SSH_OPTS:-}" -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
REMOTE_LOGS_DIR=${REMOTE_LOGS_DIR:-logs}
function usage() {
echo "USAGE:"
echo "$0: <remote-host> <name>"
exit 1
}
[ "$#" == 2 ] || usage
scp -P ${REMOTE_PORT} ${SSH_OPTS} -r ${REMOTE_USER}@${REMOTE_HOST}:${REMOTE_LOGS_DIR} .
mv ${REMOTE_LOGS_DIR} logs-${NAME}
echo DONE
|
kubernetes-incubator/bootkube
|
hack/quickstart/copylogs.sh
|
Shell
|
apache-2.0
| 568 |
#!/bin/bash
KHA_VERSION="0.0.5"
JVM_ARGS="-Dsun.security.smartcardio.library=/lib/x86_64-linux-gnu/libpcsclite.so.1"
JVM_ARGS="${JVM_ARGS} -Dorg.khannex.io.endianness=LITTLE_ENDIAN"
JVM_ARGS="${JVM_ARGS} -Dorg.khannex.logDir=/var/tmp/"
cd `readlink -f $0 | xargs dirname` && \
java $JVM_ARGS -jar kh-annex-${KHA_VERSION}.jar "$@"
|
lukacsd/kh-annex
|
src/main/resources/bin/kh-annex.sh
|
Shell
|
apache-2.0
| 332 |
#!/usr/bin/env bash
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# DO NOT MODIFY THIS FILE. Add tests to be executed in test_models.sh
# Usage: docker_test.sh [--docker-image <DOCKER_IMG_NAME>]
#
# DOCKERFILE_IMG_NAME: (Optional) The tensorflow docker container version
# If this optional value is not supplied (via the
# --docker-image flag), the default latest tensorflow docker
# will be used.
#
# SETUP
# Default exit status
EXIT=0
# Get current directory path to mount
export WORKSPACE=${PWD}
DOCKER_BINARY="docker"
# Decide docker image and tag
if [[ "$1" == "--docker-image" ]]; then
DOCKER_IMG_NAME="$2"
echo "Using specified docker tensorflow image and tag: ${DOCKER_IMG_NAME}"
shift 2
else
DOCKER_IMG_NAME="tensorflow/tensorflow:1.3.0"
echo "Using the default docker tensorflow image and tag: ${DOCKER_IMG_NAME}"
fi
# Specify which test is to be run
COMMAND="./testing/test_models.sh"
# RUN
${DOCKER_BINARY} run \
-v ${WORKSPACE}:/workspace \
-w /workspace \
-t \
${DOCKER_IMG_NAME} \
${COMMAND} \
|| EXIT=$?
# TEARDOWN
${DOCKER_BINARY} rmi \
-f \
${DOCKER_IMG_NAME}
git clean -dfx
# Return exit status
exit ${EXIT}
|
jmhsi/justin_tinker
|
data_science/courses/learning_dl_packages/models/official/testing/docker_test.sh
|
Shell
|
apache-2.0
| 1,872 |
#!/bin/bash
for i in "$@"
do
case $i in
-a=*|--app-id=*)
APP_ID="${i#*=}"
shift # past argument=value
;;
-u|--update)
UPDATE=YES
shift # past argument with no value
;;
*)
# unknown option
;;
esac
done
echo "UDPATE = ${UPDATE}"
echo "APP ID = ${APP_ID}"
SEP="==================="
confd -onetime -backend env
echo "Wrote swagger config..."
echo $SEP
cat /data/swagger.yaml
echo $SEP
if [ ${UPDATE} = "YES" ]; then
java -jar /aws-apigateway-importer.jar --update ${APP_ID} /data/swagger.yaml
else
java -jar /aws-apigateway-importer.jar --create /data/swagger.yaml
fi
|
Stockflare/aws-api-gateway
|
docker-entrypoint.sh
|
Shell
|
apache-2.0
| 658 |
#!/usr/bin/env bash
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# TensorFlow uses 'bazel' for builds and tests.
# The TensorFlow Go API aims to be usable with the 'go' tool
# (using 'go get' etc.) and thus without bazel.
#
# This script acts as a brige between bazel and go so that:
# bazel test :test
# succeeds iff
# go test github.com/tensorflow/tensorflow/tensorflow/go
# succeeds.
set -ex
# Find the 'go' tool
if [[ ! -x "go" && -z $(which go) ]]
then
if [[ -x "/usr/local/go/bin/go" ]]
then
export PATH="${PATH}:/usr/local/go/bin"
else
echo "Could not find the 'go' tool in PATH or /usr/local/go"
exit 1
fi
fi
# Setup a GOPATH that includes just the TensorFlow Go API.
export GOPATH="${TEST_TMPDIR}/go"
export GOCACHE="${TEST_TMPDIR}/cache"
mkdir -p "${GOPATH}/src/github.com/tensorflow"
ln -s "${PWD}" "${GOPATH}/src/github.com/tensorflow/tensorflow"
# Ensure that the TensorFlow C library is accessible to the
# linker at build and run time.
export LIBRARY_PATH="${PWD}/tensorflow"
OS=$(uname -s)
if [[ "${OS}" = "Linux" ]]
then
if [[ -z "${LD_LIBRARY_PATH}" ]]
then
export LD_LIBRARY_PATH="${PWD}/tensorflow"
else
export LD_LIBRARY_PATH="${PWD}/tensorflow:${LD_LIBRARY_PATH}"
fi
elif [[ "${OS}" = "Darwin" ]]
then
if [[ -z "${DYLD_LIBRARY_PATH}" ]]
then
export DYLD_LIBRARY_PATH="${PWD}/tensorflow"
else
export DYLD_LIBRARY_PATH="${PWD}/tensorflow:${DYLD_LIBRARY_PATH}"
fi
else
echo "Only support Linux/Darwin, System $OS is not supported"
exit 1
fi
# Document the Go version and run tests
echo "Go version: $(go version)"
go test \
github.com/tensorflow/tensorflow/tensorflow/go \
github.com/tensorflow/tensorflow/tensorflow/go/op
|
kevin-coder/tensorflow-fork
|
tensorflow/go/test.sh
|
Shell
|
apache-2.0
| 2,358 |
#!/bin/bash
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# This script assumes the standard setup on tensorflow Jenkins windows machines.
# It is NOT guaranteed to work on any other machine. Use at your own risk!
#
# REQUIREMENTS:
# * All installed in standard locations:
# - JDK8, and JAVA_HOME set.
# - Microsoft Visual Studio 2015 Community Edition
# - Msys2
# - Anaconda3
# * Bazel windows executable copied as "bazel.exe" and included in PATH.
# All commands shall pass, and all should be visible.
set -x
set -e
# This script is under <repo_root>/tensorflow/tools/ci_build/windows/gpu/pip/
# Change into repository root.
script_dir=$(dirname $0)
cd ${script_dir%%tensorflow/tools/ci_build/windows/gpu/pip}.
# Setting up the environment variables Bazel and ./configure needs
source "tensorflow/tools/ci_build/windows/bazel/common_env.sh" \
|| { echo "Failed to source common_env.sh" >&2; exit 1; }
# load bazel_test_lib.sh
source "tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh" \
|| { echo "Failed to source bazel_test_lib.sh" >&2; exit 1; }
# Recreate an empty bazelrc file under source root
export TMP_BAZELRC=.tmp.bazelrc
rm -f "${TMP_BAZELRC}"
touch "${TMP_BAZELRC}"
function cleanup {
# Remove all options in .tmp.bazelrc
echo "" > "${TMP_BAZELRC}"
}
trap cleanup EXIT
skip_test=0
release_build=0
for ARG in "$@"; do
if [[ "$ARG" == --skip_test ]]; then
skip_test=1
elif [[ "$ARG" == --enable_gcs_remote_cache ]]; then
set_gcs_remote_cache_options
elif [[ "$ARG" == --release_build ]]; then
release_build=1
fi
done
if [[ "$release_build" != 1 ]]; then
# --define=override_eigen_strong_inline=true speeds up the compiling of conv_grad_ops_3d.cc and conv_ops_3d.cc
# by 20 minutes. See https://github.com/tensorflow/tensorflow/issues/10521
# Because this hurts the performance of TF, we don't enable it in release build.
echo "build --define=override_eigen_strong_inline=true" >> "${TMP_BAZELRC}"
fi
# The host and target platforms are the same in Windows build. So we don't have
# to distinct them. This helps avoid building the same targets twice.
echo "build --distinct_host_configuration=false" >> "${TMP_BAZELRC}"
# Enable short object file path to avoid long path issue on Windows.
echo "startup --output_user_root=${TMPDIR}" >> "${TMP_BAZELRC}"
# Disable nvcc warnings to reduce log file size.
echo "build --copt=-nvcc_options=disable-warnings" >> "${TMP_BAZELRC}"
if ! grep -q "import %workspace%/${TMP_BAZELRC}" .bazelrc; then
echo "import %workspace%/${TMP_BAZELRC}" >> .bazelrc
fi
run_configure_for_gpu_build
bazel build --announce_rc --config=opt --nolegacy_whole_archive tensorflow/tools/pip_package:build_pip_package || exit $?
if [[ "$skip_test" == 1 ]]; then
exit 0
fi
# Create a python test directory to avoid package name conflict
PY_TEST_DIR="py_test_dir"
create_python_test_dir "${PY_TEST_DIR}"
./bazel-bin/tensorflow/tools/pip_package/build_pip_package "$PWD/${PY_TEST_DIR}"
# Running python tests on Windows needs pip package installed
PIP_NAME=$(ls ${PY_TEST_DIR}/tensorflow-*.whl)
reinstall_tensorflow_pip ${PIP_NAME}
# Define no_tensorflow_py_deps=true so that every py_test has no deps anymore,
# which will result testing system installed tensorflow
# GPU tests are very flaky when running concurrently, so set local_test_jobs=1
bazel test --announce_rc --config=opt -k --test_output=errors \
--define=no_tensorflow_py_deps=true --test_lang_filters=py \
--test_tag_filters=-no_pip,-no_windows,-no_windows_gpu,-no_gpu,-no_pip_gpu,-no_oss \
--build_tag_filters=-no_pip,-no_windows,-no_windows_gpu,-no_gpu,-no_pip_gpu,-no_oss --build_tests_only \
--local_test_jobs=1 --test_timeout="300,450,1200,3600" \
--flaky_test_attempts=3 \
//${PY_TEST_DIR}/tensorflow/python/... \
//${PY_TEST_DIR}/tensorflow/contrib/...
|
meteorcloudy/tensorflow
|
tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh
|
Shell
|
apache-2.0
| 4,482 |
pkg_name=iproute2
pkg_origin=core
pkg_version=4.16.0
pkg_source="https://www.kernel.org/pub/linux/utils/net/$pkg_name/${pkg_name}-${pkg_version}.tar.xz"
pkg_shasum="0c5c24020fd7349fe25728c5edee9fb6a1bc8a38f08e23be5c57a6301e55ee0a"
pkg_maintainer="The Habitat Maintainers <[email protected]>"
pkg_description="Collection of utilities for controlling TCP/IP networking"
pkg_upstream_url="https://wiki.linuxfoundation.org/networking/iproute2"
pkg_license=('GPL-2.0')
pkg_bin_dirs=(sbin)
pkg_lib_dirs=(lib)
pkg_build_deps=(
lilian/bison
lilian/flex
lilian/gcc
lilian/iptables
lilian/m4
lilian/make
lilian/pkg-config
)
pkg_deps=(core/glibc)
source ../defaults.sh
do_build() {
SBINDIR="$pkg_prefix/sbin"
export SBINDIR
do_default_build
}
|
be-plans/be
|
iproute2/plan.sh
|
Shell
|
apache-2.0
| 753 |
#!/bin/bash
#==============================================================
# FILE NAME: run.sh
# FUNCTION: Call program
# VERSION: 1.0
# AUTHOR: S.Yatsuzuka
#
# Copyright (C) 2017 Shunjiro Yatsuzuka
#==============================================================
#======= Check Arguments =======
if [ $# -ne 1 -a $# -ne 2 ]; then
echo
echo "Usage: $0 <Parameter File> [Output File]"
echo
exit 1
fi
#======= Get Arguments =======
PARAM_FILE=$1
echo "PARAM_FILE = ${PARAM_FILE}"
if [ $# = 2 ]; then
OUTPUT_FILE=$2
echo "OUTPUT_FILE = ${OUTPUT_FILE}"
fi
#======= Check SDL Home =======
if [ ${#SDL_HOME} = 0 ]; then
echo
echo "ERROR: SDL_HOME isn't set as environment variable"
echo
exit 1
fi
#======= Get Parameter =======
source ${PARAM_FILE}
echo "CMD = ${SDL_CMD}"
#======= Call Program =======
if [ $# = 1 ]; then
${SDL_CMD}
elif [ $# = 2 ]; then
${SDL_CMD} &> ${OUTPUT_FILE}
fi
|
syatsuzuka/sample_deeplearning
|
bin/run.sh
|
Shell
|
apache-2.0
| 983 |
#!/usr/bin/env bash
set -e
usage="Usage: $0 GOLD_XML GOLD_TAB SYSTEMS_DIR OUT_DIR NUM_JOBS"
if [ "$#" -ne 5 ]; then
echo $usage
exit 1
fi
goldx=$1; shift # gold standard queries/mentions (XML)
goldt=$1; shift # gold standard link annotations (tab-separated)
sysdir=$1; shift # directory containing output from systems
outdir=$1; shift # directory to which results are written
jobs=$1; shift # number of jobs for parallel mode
SCR=`dirname $0`
# CONVERT GOLD TO EVALUATION FORMAT
echo "INFO Converting gold to evaluation format.."
gtab=$outdir/gold.tab
cat $goldt \
| awk 'BEGIN{OFS="\t"}{print $1,$2,$3,"1.0"}' \
> $gtab
gold=$outdir/gold.combined.tsv
./nel prepare-tac -q $goldx $gtab \
> $gold
# CONVERT SYSTEMS TO EVALUATION FORMAT
echo "INFO Converting systems to evaluation format.."
ls $sysdir/*.tab \
| xargs -n 1 -P $jobs $SCR/run_tac14_prepare.sh $outdir
# EVALUATE
echo "INFO Evaluating systems.."
ls $outdir/*.combined.tsv \
| grep -v "gold\.combined\.tsv$" \
| xargs -n 1 -P $jobs $SCR/run_evaluate.sh $gold
# PREPARE SUMMARY REPORT
echo "INFO Preparing summary report.."
$SCR/run_tac14_report.sh $outdir
|
andychisholm/neleval
|
scripts/run_tac14_evaluation.sh
|
Shell
|
apache-2.0
| 1,162 |
#!/bin/bash
set -eu
function usage {
echo "Usage: $0 [OPTION]..."
echo "Run python-novaclient test suite"
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment"
echo " -x, --stop Stop running tests after the first error or failure."
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -p, --pep8 Just run pep8"
echo " -P, --no-pep8 Don't run pep8"
echo " -c, --coverage Generate coverage report"
echo " -h, --help Print this usage message"
echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list"
echo ""
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
echo " If no virtualenv is found, the script will ask if you would like to create one. If you "
echo " prefer to run tests NOT in a virtual environment, simply pass the -N option."
exit
}
function process_option {
case "$1" in
-h|--help) usage;;
-V|--virtual-env) always_venv=1; never_venv=0;;
-N|--no-virtual-env) always_venv=0; never_venv=1;;
-s|--no-site-packages) no_site_packages=1;;
-f|--force) force=1;;
-p|--pep8) just_pep8=1;;
-P|--no-pep8) no_pep8=1;;
-c|--coverage) coverage=1;;
-*) noseopts="$noseopts $1";;
*) noseargs="$noseargs $1"
esac
}
venv=.venv
with_venv=tools/with_venv.sh
always_venv=0
never_venv=0
force=0
no_site_packages=0
installvenvopts=
noseargs=
noseopts=
wrapper=""
just_pep8=0
no_pep8=0
coverage=0
for arg in "$@"; do
process_option $arg
done
# If enabled, tell nose to collect coverage data
if [ $coverage -eq 1 ]; then
noseopts="$noseopts --with-coverage --cover-package=novaclient"
fi
if [ $no_site_packages -eq 1 ]; then
installvenvopts="--no-site-packages"
fi
function run_tests {
# Just run the test suites in current environment
${wrapper} $NOSETESTS
# If we get some short import error right away, print the error log directly
RESULT=$?
return $RESULT
}
function run_pep8 {
echo "Running pep8 ..."
srcfiles="novaclient tests"
# Just run PEP8 in current environment
#
# NOTE(sirp): W602 (deprecated 3-arg raise) is being ignored for the
# following reasons:
#
# 1. It's needed to preserve traceback information when re-raising
# exceptions; this is needed b/c Eventlet will clear exceptions when
# switching contexts.
#
# 2. There doesn't appear to be an alternative, "pep8-tool" compatible way of doing this
# in Python 2 (in Python 3 `with_traceback` could be used).
#
# 3. Can find no corroborating evidence that this is deprecated in Python 2
# other than what the PEP8 tool claims. It is deprecated in Python 3, so,
# perhaps the mistake was thinking that the deprecation applied to Python 2
# as well.
pep8_opts="--ignore=E202,W602 --repeat"
${wrapper} pep8 ${pep8_opts} ${srcfiles}
}
NOSETESTS="nosetests $noseopts $noseargs"
if [ $never_venv -eq 0 ]
then
# Remove the virtual environment if --force used
if [ $force -eq 1 ]; then
echo "Cleaning virtualenv..."
rm -rf ${venv}
fi
if [ -e ${venv} ]; then
wrapper="${with_venv}"
else
if [ $always_venv -eq 1 ]; then
# Automatically install the virtualenv
python tools/install_venv.py $installvenvopts
wrapper="${with_venv}"
else
echo -e "No virtual environment found...create one? (Y/n) \c"
read use_ve
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
# Install the virtualenv and run the test suite in it
python tools/install_venv.py $installvenvopts
wrapper=${with_venv}
fi
fi
fi
fi
# Delete old coverage data from previous runs
if [ $coverage -eq 1 ]; then
${wrapper} coverage erase
fi
if [ $just_pep8 -eq 1 ]; then
run_pep8
exit
fi
run_tests
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,
# not when we're running tests individually. To handle this, we need to
# distinguish between options (noseopts), which begin with a '-', and
# arguments (noseargs).
if [ -z "$noseargs" ]; then
if [ $no_pep8 -eq 0 ]; then
run_pep8
fi
fi
if [ $coverage -eq 1 ]; then
echo "Generating coverage report in covhtml/"
${wrapper} coverage html -d covhtml -i
fi
|
rcbops/python-novaclient-buildpackage
|
run_tests.sh
|
Shell
|
apache-2.0
| 4,649 |
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -euxo pipefail
exec 1>&2
sudo apt-get update && sudo apt-get -y upgrade
sudo apt-get install -y csh wget curl vim git realpath tree htop lynx libsnappy1v5 \
lxc lvm2 xfsprogs pssh gcc g++ make gdb libboost-all-dev
pushd ~/var/yarn-ec2 > /dev/null
for vm in `sudo lxc-ls` ; do
sudo lxc-stop -k -n $vm || :
sudo lxc-destroy -f -n $vm
sleep 0.1
done
sudo service lxc stop
sudo service lxc-net stop
sudo rm -f /var/lib/misc/dnsmasq.lxcbr0.leases
sudo killall -9 java || :
sleep 0.1
sudo rm -rf /tmp/Jetty*
sudo rm -rf /tmp/hadoop*
sudo rm -rf /tmp/yarn*
sudo mkdir -p /opt/tarfiles
sudo rm -rf /opt/yarn*
sudo rm -rf /opt/thrift*
sudo rm -rf /opt/hadoop*
sudo rm -rf /opt/jdk*
HADOOP_TGZ=hadoop-2.2.0.tar.gz
HADOOP_URL=https://s3.amazonaws.com/ubuntu-ursus-packages/$HADOOP_TGZ
[ ! -e /opt/tarfiles/$HADOOP_TGZ -o ! -s /opt/tarfiles/$HADOOP_TGZ ] && \
sudo wget --no-check-certificate $HADOOP_URL -O /opt/tarfiles/$HADOOP_TGZ
sudo tar xzf /opt/tarfiles/$HADOOP_TGZ -C /opt
sudo chown -R root:root /opt/hadoop-2.2.0
sudo umount -l /usr/local/hd || :
sudo mkdir -p /usr/local/hd
sudo mount --bind -o ro /opt/hadoop-2.2.0 /usr/local/hd
TAPACK_TGZ=yarn-2.2.0-ta-pack-v1.tar.gz
TAPACK_URL=https://s3.amazonaws.com/ubuntu-ursus-packages/$TAPACK_TGZ
[ ! -e /opt/tarfiles/$TAPACK_TGZ -o ! -s /opt/tarfiles/$TAPACK_TGZ ] && \
sudo wget --no-check-certificate $TAPACK_URL -O /opt/tarfiles/$TAPACK_TGZ
sudo tar xzf /opt/tarfiles/$TAPACK_TGZ -C /opt
sudo chown -R root:root /opt/yarn-2.2.0-ta-pack-v1
sudo cp /opt/yarn-2.2.0-ta-pack-v1/jobexe/* /opt/hadoop-2.2.0/
sudo mv /opt/hadoop-2.2.0/share/hadoop/yarn/hadoop-yarn-server-resourcemanager-2.2.0.jar \
/opt/hadoop-2.2.0/share/hadoop/yarn/hadoop-yarn-server-resourcemanager-2.2.0.jar.origin
sudo cp /opt/yarn-2.2.0-ta-pack-v1/hadoop-yarn-server-resourcemanager-2.2.0.jar /opt/hadoop-2.2.0/share/hadoop/yarn/
sudo cp /opt/yarn-2.2.0-ta-pack-v1/hadoop-yarn-applications-mpirunner-2.2.0.jar /opt/hadoop-2.2.0/
sudo cp /opt/yarn-2.2.0-ta-pack-v1/hadoop-yarn-applications-gpu-2.2.0.jar /opt/hadoop-2.2.0/
SUNJDK_TGZ=jdk-8u121-linux-x64.tar.gz
SUNJDK_URL=https://s3.amazonaws.com/ubuntu-ursus-packages/$SUNJDK_TGZ
[ ! -e /opt/tarfiles/$SUNJDK_TGZ -o ! -s /opt/tarfiles/$SUNJDK_TGZ ] && \
sudo wget --no-check-certificate $SUNJDK_URL -O /opt/tarfiles/$SUNJDK_TGZ
sudo tar xzf /opt/tarfiles/$SUNJDK_TGZ -C /opt
sudo chown -R root:root /opt/jdk1.8.0_121
sudo umount -l /usr/lib/jvm/sunjdk || :
sudo mkdir -p /usr/lib/jvm/sunjdk
sudo mount --bind -o ro /opt/jdk1.8.0_121 /usr/lib/jvm/sunjdk
THRIFT_TGZ=thrift-0.9.1.tar.gz
THRIFT_URL=https://s3.amazonaws.com/ubuntu-ursus-packages/$THRIFT_TGZ
[ ! -e /opt/tarfiles/$THRIFT_TGZ -o ! -s /opt/tarfiles/$THRIFT_TGZ ] && \
sudo wget --no-check-certificate $THRIFT_URL -O /opt/tarfiles/$THRIFT_TGZ
sudo tar xzf /opt/tarfiles/$THRIFT_TGZ -C /opt
sudo chown -R root:root /opt/thrift-0.9.1
sudo umount -l /usr/local/thrift || :
sudo mkdir -p /usr/local/thrift
sudo mount --bind -o ro /opt/thrift-0.9.1 /usr/local/thrift
sudo cp /opt/thrift-0.9.1/share/*.jar /opt/hadoop-2.2.0/share/hadoop/yarn/lib/
cat <<EOF | sudo tee /etc/ld.so.conf.d/libthrift.conf
/usr/local/thrift/lib
EOF
sudo ldconfig
sudo rm -rf /srv/hdfs*
sudo rm -rf /srv/yarn*
sudo mkdir /srv/hdfs
sudo ln -s /usr/local/hd/bin /srv/hdfs/
sudo ln -s /usr/local/hd/lib /srv/hdfs/
sudo ln -s /usr/local/hd/libexec /srv/hdfs/
sudo ln -s /usr/local/hd/sbin /srv/hdfs/
sudo ln -s /usr/local/hd/share /srv/hdfs/
sudo mkdir /srv/hdfs/logs
sudo mkdir /srv/hdfs/conf
sudo ln -s /usr/local/hd/etc/hadoop/* /srv/hdfs/conf/
sudo rm -f /srv/hdfs/conf/core-site.xml
sudo rm -f /srv/hdfs/conf/hdfs-site.xml
sudo rm -f /srv/hdfs/conf/container*
sudo rm -f /srv/hdfs/conf/httpfs*
sudo rm -f /srv/hdfs/conf/mapred*
sudo rm -f /srv/hdfs/conf/yarn*
sudo rm -f /srv/hdfs/conf/*-scheduler.xml
sudo rm -f /srv/hdfs/conf/*example
sudo rm -f /srv/hdfs/conf/*cmd
sudo rm -f /srv/hdfs/conf/slaves
cat hosts | fgrep r | fgrep -v h | cut -d' ' -f2 | sudo tee /srv/hdfs/conf/slaves
echo "r0" | sudo tee /srv/hdfs/conf/boss
sudo cp ~/share/yarn-ec2/hd/conf/core-site.xml /srv/hdfs/conf/
sudo cp ~/share/yarn-ec2/hd/conf/hdfs-site.xml /srv/hdfs/conf/
sudo mkdir /srv/yarn
sudo ln -s /usr/local/hd/bin /srv/yarn/
sudo ln -s /usr/local/hd/lib /srv/yarn/
sudo ln -s /usr/local/hd/libexec /srv/yarn/
sudo ln -s /usr/local/hd/sbin /srv/yarn/
sudo ln -s /usr/local/hd/share /srv/yarn/
sudo ln -s /usr/local/hd/hadoop-yarn-applications-* /srv/yarn/
sudo ln -s /usr/local/hd/bt* /srv/yarn/
sudo ln -s /usr/local/hd/cg* /srv/yarn/
sudo ln -s /usr/local/hd/ft* /srv/yarn/
sudo ln -s /usr/local/hd/sp* /srv/yarn/
sudo mkdir /srv/yarn/logs
sudo mkdir /srv/yarn/conf
sudo ln -s /usr/local/hd/etc/hadoop/* /srv/yarn/conf/
sudo rm -f /srv/yarn/conf/core-site.xml
sudo rm -r /srv/yarn/conf/yarn-site.xml
sudo rm -f /srv/yarn/conf/hdfs*
sudo rm -f /srv/yarn/conf/httpfs*
sudo rm -f /srv/yarn/conf/mapred*
sudo rm -f /srv/yarn/conf/*example
sudo rm -f /srv/yarn/conf/*cmd
sudo rm -f /srv/yarn/conf/slaves
cat hosts | fgrep r | fgrep h | cut -d' ' -f2 | sudo tee /srv/yarn/conf/slaves
echo "r0" | sudo tee /srv/yarn/conf/boss
sudo cp ~/share/yarn-ec2/hd/conf/core-site.xml /srv/yarn/conf/
cat <<EOF | sudo tee /etc/environment
PATH="/usr/local/sbin:/usr/local/bin:/usr/lib/jvm/sunjdk/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games"
JAVA_HOME="/usr/lib/jvm/sunjdk"
EOF
PRIMARY_IP=`curl http://169.254.169.254/latest/meta-data/local-ipv4`
echo "$PRIMARY_IP" > my_primary_ip
MAC=`curl http://169.254.169.254/latest/meta-data/mac`
CIDR=`curl http://169.254.169.254/latest/meta-data/network/interfaces/macs/$MAC/subnet-ipv4-cidr-block`
echo "$CIDR" > my_cidr
PRIVATE_IPS=`curl http://169.254.169.254/latest/meta-data/network/interfaces/macs/$MAC/local-ipv4s`
echo "$PRIVATE_IPS" > my_ips
OFFSET=`cat all-nodes | grep -n ^$PRIMARY_IP$ | cut -d: -f1`
ID=$(( OFFSET - 1 ))
echo "$ID" > my_id
MASK=`echo $CIDR | cut -d/ -f2`
DEV=`ls -1 /sys/class/net/ | fgrep -v lxc | fgrep -v lo | head -1`
echo "$DEV" > my_nic
sudo ip link set dev $DEV mtu 1500
sudo ip addr show dev $DEV
sudo ip addr flush secondary dev $DEV
for ipv4 in `cat my_ips` ; do
if [ x"$ipv4" != x"$PRIMARY_IP" ] ; then
sudo ip addr add $ipv4/$MASK brd + dev $DEV
fi
done
sudo ip addr show dev $DEV
cat <<EOF | sudo tee /etc/hosts
127.0.0.1 localhost
4.4.4.4 mashiro
8.8.8.8 ibuki
# The following lines are desirable for IPv6 capable hosts
::1 ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
EOF
cat hosts | sudo tee -a /etc/hosts
HOSTNAME=`echo r"$ID"`
echo $HOSTNAME | sudo tee /etc/hostname
sudo hostname $HOSTNAME
cat <<EOF | sudo tee /etc/ssh/ssh_config
Host *
PasswordAuthentication no
HashKnownHosts no
UserKnownHostsFile /dev/null
StrictHostKeyChecking no
GSSAPIAuthentication yes
GSSAPIDelegateCredentials no
SendEnv LANG LC_*
EOF
function try_fgrep() {
fgrep $@ || :
}
XFS_MOUNT_OPTS="defaults,noatime,nodiratime,allocsize=8m"
DISKS=`lsblk -ln | fgrep disk | cut -d' ' -f1 | try_fgrep -v da`
echo -n "$DISKS" | awk '{print "/dev/" $0}' > my_disks
NUM_DISKS=`cat my_disks | wc -l`
LV_NAME="lxclv0"
VG_NAME="lxcvg0"
LV="/dev/$VG_NAME/$LV_NAME"
VG="/dev/$VG_NAME"
sudo lsof | grep /mnt || :
sudo fuser -k /mnt/*log || :
sudo lsblk
sudo umount -f /mnt || :
if [ -e $LV ] ; then
sudo umount -f $LV || :
sudo lvremove -f $LV
fi
if [ -e $VG ] ; then
sudo vgremove -f $VG
fi
if [ $NUM_DISKS -gt 0 ] ; then
for dev in `cat my_disks` ; do
sudo pvcreate -ff -y $dev
done
sudo vgcreate -y $VG_NAME `cat my_disks | paste -sd ' ' -`
sudo lvcreate -y -Wy -Zy -l 100%FREE \
-n $LV_NAME $VG_NAME
sleep 0.1
if [ -e $LV ] ; then
sudo mkfs.xfs -f $LV
sudo mount -o $XFS_MOUNT_OPTS $LV /mnt
fi
fi
sudo rm -rf /mnt/*
sudo mkdir /mnt/hdscratch
sudo lsblk
sudo df -h
NUM_CPUS=`cat /proc/cpuinfo | fgrep proc | wc -l`
echo "$NUM_CPUS" > my_ncpus
sudo cp -f ~/share/yarn-ec2/lxc/share/lxc/templates/* /usr/share/lxc/templates/
sudo cp -f ~/share/yarn-ec2/lxc/etc/default/* /etc/default/
sudo cp -f ~/share/yarn-ec2/lxc/etc/lxc/* /etc/lxc/
function setup_vm_iptables() {
### @param rack_id, host_id ###
VM_NAME=`echo r"$1"h"$2"`
IFCONF="/mnt/$VM_NAME/rootfs/etc/network/interfaces"
echo "post-up iptables -t nat -F" | sudo tee -a $IFCONF
echo "post-up tc qdisc add dev eth0 root handle 1: htb default 1" \
| sudo tee -a $IFCONF
echo "post-up tc class add dev eth0 parent 1: classid 1:1 htb rate 1250mbit ceil 1250mbit" \
| sudo tee -a $IFCONF
cat hosts | try_fgrep h | while read ln ; do
PEER_NAME=`echo $ln | cut -d' ' -f2`
PEER_RACK=`echo $PEER_NAME | cut -dr -f2 | cut -dh -f1`
PEER_HOST=`echo $PEER_NAME | cut -dr -f2 | cut -dh -f2`
if [ $1 -ne $PEER_RACK ] ; then
PEER_ID=$(( PEER_HOST + PEER_RACK * 10 + 100))
PEER_IP=`echo $ln | cut -d' ' -f1`
echo -n "post-up iptables -t nat -A OUTPUT " | sudo tee -a $IFCONF
echo "-d 192.168.1.$PEER_ID -j DNAT --to $PEER_IP" | sudo tee -a $IFCONF
echo -n "post-up iptables -t nat -A INPUT " | sudo tee -a $IFCONF
echo "-s $PEER_IP -j SNAT --to 192.168.1.$PEER_ID" | sudo tee -a $IFCONF
fi
done
}
function create_vm() {
### @param rack_id, host_id, ip, mem, ncpus, vmem, nvcores ###
VM_NAME=`echo r"$1"h"$2"`
sudo lxc-create -n $VM_NAME -t debian -- \
--release wheezy ### --packages ??? ###
sudo cp -r ~/.ssh /mnt/$VM_NAME/rootfs/root/
sudo chown -R root:root /mnt/$VM_NAME/rootfs/root/.ssh
sudo cp -f /etc/ssh/ssh_config /mnt/$VM_NAME/rootfs/etc/ssh/
sudo cp -f /etc/profile /mnt/$VM_NAME/rootfs/etc/
sudo cp -r /srv/yarn /srv/yarn-$VM_NAME
sudo rm -f /srv/yarn-$VM_NAME/conf/yarn-site.xml
sudo cp ~/share/yarn-ec2/node-mngr/conf/yarn-site.xml /srv/yarn-$VM_NAME/conf/
sudo sed -i "s/yarn.nodemanager.hostname.value/$VM_NAME/" /srv/yarn-$VM_NAME/conf/yarn-site.xml
sudo sed -i "s/yarn.nodemanager.resource.cpu-vcores.value/$7/" /srv/yarn-$VM_NAME/conf/yarn-site.xml
sudo sed -i "s/yarn.nodemanager.resource.memory-mb.value/$6/" /srv/yarn-$VM_NAME/conf/yarn-site.xml
echo "lxc.mount.entry = /srv/yarn-$VM_NAME srv/yarn none rw,bind,create=dir" | \
sudo tee -a /mnt/$VM_NAME/config
sudo sed -i "/lxc.network.ipv4 =/c lxc.network.ipv4 = $3" \
/mnt/$VM_NAME/config
sudo sed -i "/lxc.cgroup.memory.max_usage_in_bytes =/c lxc.cgroup.memory.max_usage_in_bytes = ${4}M" \
/mnt/$VM_NAME/config
sudo sed -i "/lxc.cgroup.memory.limit_in_bytes =/c lxc.cgroup.memory.limit_in_bytes = ${4}M" \
/mnt/$VM_NAME/config
core_begin=$(( $2 * $5 ))
core_end=$(( core_begin + $5 - 1 ))
VM_CPUS=`echo "$core_begin"-"$core_end"`
sudo sed -i "/lxc.cgroup.cpuset.cpus =/c lxc.cgroup.cpuset.cpus = $VM_CPUS" \
/mnt/$VM_NAME/config
cat vmhosts | sudo tee -a /mnt/$VM_NAME/rootfs/etc/hosts
setup_vm_iptables $1 $2
}
RACK_ID="$ID"
if [ $RACK_ID -eq 0 ] ; then
sudo cp ~/share/yarn-ec2/resource-mngr/conf/yarn-site.xml \
/srv/yarn/conf/yarn-site.xml
sudo sed -i "s/yarn.resourcemanager.scheduler.class.value/`cat ~/etc/yarn-scheduler.txt`/" \
/srv/yarn/conf/yarn-site.xml
WORKER_LIST=`cat hosts | try_fgrep r | try_fgrep h | try_fgrep -v r0`
WORKERS=`echo -n "$WORKER_LIST" | cut -d' ' -f2 | tr '\n' ','`
sudo sed -i "s/yarn.tetris.hostnames.value/${WORKERS:0:-1}/" \
/srv/yarn/conf/yarn-site.xml
fi
HOST_ID=0
for ip in `cat rack-$ID/vmips` ; do
NODE_ID=$(( HOST_ID + RACK_ID * 10 + 100))
sudo sed -i "s/$ip /192.168.1.$NODE_ID /" /etc/hosts
create_vm $RACK_ID $HOST_ID "192.168.1.$NODE_ID/24 192.168.1.255" \
"`cat rack-$ID/vmmem`" "`cat rack-$ID/vmncpus`" \
"`cat rack-$ID/vmvmem`" "`cat rack-$ID/vmnvcpus`"
HOST_ID=$(( HOST_ID + 1 ))
done
sudo service lxc-net start
sudo iptables -t nat -F ### will use our own rules ###
sudo iptables -t nat -L -n
sudo service lxc start
sudo lxc-ls -f
sudo cp -f ~/share/yarn-ec2/exec/* /usr/local/sbin/
sudo cp -f ~/share/yarn-ec2/hd/exec/* /usr/local/sbin/
sudo cp -f ~/share/yarn-ec2/resource-mngr/exec/* /usr/local/sbin/
sudo cp -f ~/share/yarn-ec2/node-mngr/exec/* /usr/local/sbin/
sudo mkdir -p ~/lib
sudo mkdir -p ~/bin
sudo mkdir -p ~/src
popd > /dev/null
exit 0
|
zhengqmark/yarn-ec2
|
setup-slave.sh
|
Shell
|
apache-2.0
| 13,317 |
# -----------------------------------------------------------------------------
#
# Package : array.prototype.flat
# Version : 1.2.4
# Source repo : https://github.com/es-shims/Array.prototype.flat
# Tested on : RHEL 8.3
# Script License: Apache License, Version 2 or later
# Maintainer : BulkPackageSearch Automation <[email protected]>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
PACKAGE_NAME=array.prototype.flat
PACKAGE_VERSION=1.2.4
PACKAGE_URL=https://github.com/es-shims/Array.prototype.flat
yum -y update && yum install -y yum-utils nodejs nodejs-devel nodejs-packaging npm python38 python38-devel ncurses git gcc gcc-c++ libffi libffi-devel ncurses git jq make cmake
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/8.3Server/ppc64le/appstream/
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/8.3Server/ppc64le/baseos/
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/7Server/ppc64le/optional/
yum install -y firefox liberation-fonts xdg-utils && npm install n -g && n latest && npm install -g npm@latest && export PATH="$PATH" && npm install --global yarn grunt-bump xo testem acorn
OS_NAME=`python3 -c "os_file_data=open('/etc/os-release').readlines();os_info = [i.replace('PRETTY_NAME=','').strip() for i in os_file_data if i.startswith('PRETTY_NAME')];print(os_info[0])"`
HOME_DIR=`pwd`
if ! git clone $PACKAGE_URL $PACKAGE_NAME; then
echo "------------------$PACKAGE_NAME:clone_fails---------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME" > /home/tester/output/clone_fails
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Clone_Fails" > /home/tester/output/version_tracker
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
git checkout $PACKAGE_VERSION
PACKAGE_VERSION=$(jq -r ".version" package.json)
# run the test command from test.sh
if ! npm install && npm audit fix && npm audit fix --force; then
echo "------------------$PACKAGE_NAME:install_fails-------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_Fails"
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
if ! npm test; then
echo "------------------$PACKAGE_NAME:install_success_but_test_fails---------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_success_but_test_Fails"
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success"
exit 0
fi
|
ppc64le/build-scripts
|
a/array.prototype.flat/array.prototype.flat_rhel_8.3.sh
|
Shell
|
apache-2.0
| 3,100 |
#! /bin/bash
#
# run.sh
# Copyright (C) 2019 damian <damian@damian-laptop>
#
# Distributed under terms of the MIT license.
#
testcafe chrome test1.js
|
xmementoit/practiseSamples
|
testcafe/run.sh
|
Shell
|
apache-2.0
| 153 |
# ----------------------------------------------------------------------------
#
# Package : default
# Version : 0.1.1
# Source repo : https://github.com/safareli/default.git
# Tested on : ubuntu_18.04
# Script License: Apache License, Version 2 or later
# Maintainer : Atul Sowani <[email protected]>
#
# Disclaimer: This script has been tested in non-root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
#!/bin/bash
# Install dependencies.
sudo apt-get update -y
sudo apt-get install -y git nodejs npm
# Clone and build source.
git clone https://github.com/safareli/default.git
cd default
npm install
npm test
|
ppc64le/build-scripts
|
d/default/default_ubuntu_18.04.sh
|
Shell
|
apache-2.0
| 913 |
export SPARK_HOME=/opt/spark
export PATH=$PATH:/$SPARK_HOME/bin
export NUM_CORES=$3
export MEMORY=$4
echo "HELLO WORLD!"
csv_dataset=$1
w2v_model=$2
w2v_jar_path=/root/imr-code/imr_workflow_spark/operators/imr_w2v_2.11-1.0.jar
echo "PARAMS: ${w2v_jar_path} ${w2v_model}"
spark-submit --executor-memory ${MEMORY%.*}M --driver-memory 1G --total-executor-cores ${NUM_CORES%.*} $w2v_jar_path sm $csv_dataset hdfs://master:9000$w2v_model
|
project-asap/IReS-Platform
|
asap-platform/asap-server/asapLibrary/operators/w2v_train_spark/w2v_train_spark.sh
|
Shell
|
apache-2.0
| 440 |
# Copyright (C) 2014 Internet Systems Consortium, Inc. ("ISC")
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
rm -f ns1/example.db
rm -f nslookup.out*
|
execunix/vinos
|
external/bsd/bind/dist/bin/tests/system/nslookup/clean.sh
|
Shell
|
apache-2.0
| 813 |
#!/bin/bash
set -e
pushd web
./gradlew clean assemble
VERSION=`cat version-number`
popd
mkdir build-output/libs && cp web/build/libs/$ARTIFACT_ID-$VERSION.jar build-output/libs/.
cp web/build/manifest.yml build-output/.
|
pivotal-bank/web-ui
|
ci/tasks/build-web/task.sh
|
Shell
|
apache-2.0
| 231 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.