code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
let "a += 3"
echo $a
|
cxsjabc/basic
|
bash/_basic/let.sh
|
Shell
|
agpl-3.0
| 34 |
source common.sh
clearStore
clearProfiles
set -x
# Query installed: should be empty.
test "$(nix-env -p $profiles/test -q '*' | wc -l)" -eq 0
export HOME=$TEST_ROOT/home
mkdir -p $HOME
nix-env --switch-profile $profiles/test
# Query available: should contain several.
test "$(nix-env -f ./user-envs.nix -qa '*' | wc -l)" -eq 6
outPath10=$(nix-env -f ./user-envs.nix -qa --out-path --no-name '*' | grep foo-1.0)
drvPath10=$(nix-env -f ./user-envs.nix -qa --drv-path --no-name '*' | grep foo-1.0)
[ -n "$outPath10" -a -n "$drvPath10" ]
# Query descriptions.
nix-env -f ./user-envs.nix -qa '*' --description | grep -q silly
rm -f $HOME/.nix-defexpr
ln -s $(pwd)/user-envs.nix $HOME/.nix-defexpr
nix-env -qa '*' --description | grep -q silly
# Install "foo-1.0".
nix-env -i foo-1.0
# Query installed: should contain foo-1.0 now (which should be
# executable).
test "$(nix-env -q '*' | wc -l)" -eq 1
nix-env -q '*' | grep -q foo-1.0
test "$($profiles/test/bin/foo)" = "foo-1.0"
# Test nix-env -qc to compare installed against available packages, and vice versa.
nix-env -qc '*' | grep -q '< 2.0'
nix-env -qac '*' | grep -q '> 1.0'
# Test the -b flag to filter out source-only packages.
[ "$(nix-env -qab | wc -l)" -eq 1 ]
# Test the -s flag to get package status.
nix-env -qas | grep -q 'IP- foo-1.0'
nix-env -qas | grep -q -- '--- bar-0.1'
# Disable foo.
nix-env --set-flag active false foo
! [ -e "$profiles/test/bin/foo" ]
# Enable foo.
nix-env --set-flag active true foo
[ -e "$profiles/test/bin/foo" ]
# Store the path of foo-1.0.
outPath10_=$(nix-env -q --out-path --no-name '*' | grep foo-1.0)
echo "foo-1.0 = $outPath10"
[ "$outPath10" = "$outPath10_" ]
# Install "foo-2.0pre1": should remove foo-1.0.
nix-env -i foo-2.0pre1
# Query installed: should contain foo-2.0pre1 now.
test "$(nix-env -q '*' | wc -l)" -eq 1
nix-env -q '*' | grep -q foo-2.0pre1
test "$($profiles/test/bin/foo)" = "foo-2.0pre1"
# Upgrade "foo": should install foo-2.0.
NIX_PATH=nixpkgs=./user-envs.nix:$NIX_PATH nix-env -f '<nixpkgs>' -u foo
# Query installed: should contain foo-2.0 now.
test "$(nix-env -q '*' | wc -l)" -eq 1
nix-env -q '*' | grep -q foo-2.0
test "$($profiles/test/bin/foo)" = "foo-2.0"
# Store the path of foo-2.0.
outPath20=$(nix-env -q --out-path --no-name '*' | grep foo-2.0)
test -n "$outPath20"
# Install bar-0.1, uninstall foo.
nix-env -i bar-0.1
nix-env -e foo
# Query installed: should only contain bar-0.1 now.
if nix-env -q '*' | grep -q foo; then false; fi
nix-env -q '*' | grep -q bar
# Rollback: should bring "foo" back.
oldGen="$(nix-store -q --resolve $profiles/test)"
nix-env --rollback
[ "$(nix-store -q --resolve $profiles/test)" != "$oldGen" ]
nix-env -q '*' | grep -q foo-2.0
nix-env -q '*' | grep -q bar
# Rollback again: should remove "bar".
nix-env --rollback
nix-env -q '*' | grep -q foo-2.0
if nix-env -q '*' | grep -q bar; then false; fi
# Count generations.
nix-env --list-generations
test "$(nix-env --list-generations | wc -l)" -eq 7
# Switch to a specified generation.
nix-env --switch-generation 7
[ "$(nix-store -q --resolve $profiles/test)" = "$oldGen" ]
# Install foo-1.0, now using its store path.
nix-env -i "$outPath10"
nix-env -q '*' | grep -q foo-1.0
nix-store -qR $profiles/test | grep "$outPath10"
nix-store -q --referrers-closure $profiles/test | grep "$(nix-store -q --resolve $profiles/test)"
[ "$(nix-store -q --deriver "$outPath10")" = $drvPath10 ]
# Uninstall foo-1.0, using a symlink to its store path.
ln -sfn $outPath10/bin/foo $TEST_ROOT/symlink
nix-env -e $TEST_ROOT/symlink
if nix-env -q '*' | grep -q foo; then false; fi
! nix-store -qR $profiles/test | grep "$outPath10"
# Install foo-1.0, now using a symlink to its store path.
nix-env -i $TEST_ROOT/symlink
nix-env -q '*' | grep -q foo
# Delete all old generations.
nix-env --delete-generations old
# Run the garbage collector. This should get rid of foo-2.0 but not
# foo-1.0.
nix-collect-garbage
test -e "$outPath10"
! [ -e "$outPath20" ]
# Uninstall everything
nix-env -e '*'
test "$(nix-env -q '*' | wc -l)" -eq 0
# Installing "foo" should only install the newest foo.
nix-env -i foo
test "$(nix-env -q '*' | grep foo- | wc -l)" -eq 1
nix-env -q '*' | grep -q foo-2.0
# On the other hand, this should install both (and should fail due to
# a collision).
nix-env -e '*'
! nix-env -i foo-1.0 foo-2.0
# Installing "*" should install one foo and one bar.
nix-env -e '*'
nix-env -i '*'
test "$(nix-env -q '*' | wc -l)" -eq 2
nix-env -q '*' | grep -q foo-2.0
nix-env -q '*' | grep -q bar-0.1.1
# Test priorities: foo-0.1 has a lower priority than foo-1.0, so it
# should be possible to install both without a collision. Also test
# ‘--set-flag priority’ to manually override the declared priorities.
nix-env -e '*'
nix-env -i foo-0.1 foo-1.0
[ "$($profiles/test/bin/foo)" = "foo-1.0" ]
nix-env --set-flag priority 1 foo-0.1
[ "$($profiles/test/bin/foo)" = "foo-0.1" ]
# Test nix-env --set.
nix-env --set $outPath10
[ "$(nix-store -q --resolve $profiles/test)" = $outPath10 ]
nix-env --set $drvPath10
[ "$(nix-store -q --resolve $profiles/test)" = $outPath10 ]
|
Mathnerd314/nix
|
tests/user-envs.sh
|
Shell
|
lgpl-2.1
| 5,105 |
#!/bin/bash
set -e
set +h
. /etc/alps/alps.conf
. /var/lib/alps/functions
NAME="network-manager-vpnc"
VERSION="0.9.10.0"
#REQ:vpnc
URL=http://archive.ubuntu.com/ubuntu/pool/universe/n/network-manager-vpnc/network-manager-vpnc_0.9.10.0.orig.tar.xz
cd $SOURCE_DIR
wget -nc $URL
TARBALL=`echo $URL | rev | cut -d/ -f1 | rev`
DIRECTORY=`tar tf $TARBALL | cut -d/ -f1 | uniq`
tar -xf $TARBALL
cd $DIRECTORY
./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var --with-gnome &&
make "-j`nproc`"
sudo make install
cd $SOURCE_DIR
cleanup "$NAME" "$DIRECTORY"
register_installed "$NAME" "$VERSION" "$INSTALLED_LIST"
|
FluidIdeas/parsers
|
blfs-resources/extrascripts/network-manager-vpnc.sh
|
Shell
|
lgpl-2.1
| 629 |
#!/bin/sh
## Setup fleet
FLEET_CONF_TEMPLATE="/opt/data/vagrant/fleet.conf.core"
IP=`/opt/data/tools/resolve-ip.sh`
sed -e "s/##IP##/$IP/g" $FLEET_CONF_TEMPLATE > /media/state/etc/fleet.conf
## Setup etcd
mkdir -p /media/state/overlays/var/lib/etcd-local
chown core:core /media/state/overlays/var/lib/etcd-local
cp /opt/data/systemd/*/*local.service /media/state/units/
chown core:core /media/state/units/*.service
chmod 644 /media/state/units/*.service
systemctl daemon-reload
systemctl restart local-enable.service
|
nuxeo/nuxeo.io
|
vagrant/setup_host.sh
|
Shell
|
lgpl-2.1
| 522 |
#!/bin/bash
#
# Copyright (C) 2019 Matthias Clasen
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
set -euo pipefail
. $(dirname $0)/libtest.sh
# This test looks for specific localized strings.
export LC_ALL=C
echo "1..5"
${FLATPAK} config --list > list_out
assert_file_has_content list_out "^languages:"
ok "config list"
${FLATPAK} config --set languages "de;fr"
${FLATPAK} config --get languages > get_out
assert_file_has_content get_out "^de;fr"
ok "config set"
${FLATPAK} config --set languages "*"
${FLATPAK} config --get languages > get_out
assert_file_has_content get_out "*"
ok "config languages *"
${FLATPAK} config --set languages "all"
${FLATPAK} config --get languages > get_out
assert_file_has_content get_out "all"
ok "config languages *"
${FLATPAK} config --unset languages
${FLATPAK} config --get languages > get_out
assert_file_has_content get_out "^[*]unset[*]"
ok "config unset"
|
flatpak/flatpak
|
tests/test-config.sh
|
Shell
|
lgpl-2.1
| 1,590 |
#!/bin/bash -
#===============================================================================
#
# FILE: .travis-ci.sh
#
# USAGE: ./.travis-ci.sh
#
# DESCRIPTION: Generate the ml file and compile
#
#
# CREATED: 2017
# REVISION: 1.0
#===============================================================================
set -o nounset # Treat unset variables as an error
URL=https://github.com/krzischp/ocaml-cordova-plugin-intent.git
PLUGIN_PIN=cordova-plugin-intent
export OPAMYES=1
if [ -f "$HOME/.opam/config" ]; then
opam update
opam upgrade
else
opam init
fi
if [ -n "${OPAM_SWITCH}" ]; then
opam switch ${OPAM_SWITCH}
fi
eval `opam config env`
export OCAMLRUNPARAM=b
opam pin add ocaml-js-stdlib https://github.com/dannywillems/ocaml-js-stdlib.git
# Test for make and make clean
make && make clean
# Test for make install and make remove
make install && make remove && make clean
|
krzischp/ocaml-cordova-plugin-intent
|
.travis-ci.sh
|
Shell
|
lgpl-3.0
| 972 |
#!/bin/sh
. /opt/pyrame/ports.sh
if test $# -lt 1
then
echo "usage $0 conf_string"
exit 1
fi
chkpyr2.py localhost $HA_HMP4030_PORT init_ha_hmp4030 $@
|
sbinet-staging/pyrame
|
ps/cmd_ha_hmp4030/init.sh
|
Shell
|
lgpl-3.0
| 154 |
#!/bin/bash
#export PKG_CONFIG_PATH=..
rm -rf build
mkdir build
pushd build
cmake ..
make VERBOSE=1
popd
build/casadi_demo
|
casadi/casadi
|
docs/examples/cplusplus/cmake_pkgconfig/run.sh
|
Shell
|
lgpl-3.0
| 126 |
#!/bin/bash
ACLOCAL=aclocal
AUTOCONF=autoconf
AUTOMAKE=automake
AUTORECONF=autoreconf
if [ "$(uname -s)" = "Darwin" ]; then
LIBTOOL=glibtool
LIBTOOLIZE=glibtoolize
else
LIBTOOL=libtool
LIBTOOLIZE=libtoolize
fi
mkdir -p aclocal.d
mkdir -p config.aux
$ACLOCAL
$LIBTOOLIZE
$AUTOMAKE --add-missing
$AUTORECONF
|
andydude/harcurl
|
autogen.sh
|
Shell
|
lgpl-3.0
| 329 |
#!/bin/bash
cd "$(dirname "$0")"
../map-creator.sh europe/great-britain/england ram
../map-creator.sh europe/great-britain/scotland ram
../map-creator.sh europe/great-britain/wales ram
|
mapsforge/mapsforge-creator
|
v3/europe-great-britain.sh
|
Shell
|
lgpl-3.0
| 187 |
#!/bin/bash
set -e
_common_pp="-D_GLIBCXX_DEBUG -D_GLIBCXX_DEBUG_PEDANTIC"
_c_pp="-std=c99"
_cxx_pp="-std=c++11"
_optimize="-O2 -g3"
mkdir -p m4
autoreconf -if
./configure CPPFLAGS="${CPPFLAGS} ${_common_pp} ${_optimize}" CFLAGS="${CFLAGS} ${_c_pp}" CXXFLAGS="${CXXFLAGS} ${_cxx_pp}" LDFLAGS="${LDFLAGS} ${_optimize}" \
--enable-shared --disable-static --prefix="/usr/local" $*
|
lhmouse/poseidon-medusa
|
reconfig_optimized_debug_cxx11.sh
|
Shell
|
lgpl-3.0
| 383 |
#!/bin/bash
# add 2016-11-22 by Pascal Withopf, released under ASL 2.0
. $srcdir/diag.sh init
. $srcdir/diag.sh generate-conf
. $srcdir/diag.sh add-conf '
module(load="../plugins/imtcp/.libs/imtcp")
input(type="imtcp" port="13514" ruleset="customparser")
parser(name="custom.rfc3164" type="pmrfc3164" remove.msgFirstSpace="on")
template(name="outfmt" type="string" string="-%msg%-\n")
ruleset(name="customparser" parser="custom.rfc3164") {
:syslogtag, contains, "tag" action(type="omfile" template="outfmt" file="rsyslog.out.log")
}
'
. $srcdir/diag.sh startup
. $srcdir/diag.sh tcpflood -m1 -M "\"<129>Mar 10 01:00:00 172.20.245.8 tag: msgnum:1\""
. $srcdir/diag.sh tcpflood -m1 -M "\"<129>Mar 10 01:00:00 172.20.245.8 tag: msgnum:2\""
. $srcdir/diag.sh tcpflood -m1 -M "\"<129>Mar 10 01:00:00 172.20.245.8 tag:msgnum:3\""
. $srcdir/diag.sh tcpflood -m1 -M "\"<129>Mar 10 01:00:00 172.20.245.8 tag4:\""
. $srcdir/diag.sh shutdown-when-empty
. $srcdir/diag.sh wait-shutdown
echo '-msgnum:1-
- msgnum:2-
-msgnum:3-
--' | cmp rsyslog.out.log
if [ ! $? -eq 0 ]; then
echo "invalid response generated, rsyslog.out.log is:"
cat rsyslog.out.log
. $srcdir/diag.sh error-exit 1
fi;
. $srcdir/diag.sh exit
|
shane-lawrence/rsyslog
|
tests/pmrfc3164-msgFirstSpace.sh
|
Shell
|
lgpl-3.0
| 1,208 |
#!/usr/bin/env sh
echo '' > test_output.txt
## test validation
echo "Testing dbcAmplicons validate, these should fail"
python ../bin/dbcAmplicons validate -B barcodeLookupTable.txt -P primerLookupTable-bd.txt -S sampleLookupTable-bd.txt >> test_output.txt
python ../bin/dbcAmplicons validate -B barcodeLookupTable.txt -S sampleLookupTable-np.txt >> test_output.txt
## testing preprocess
echo "Testing dbcAmplicons preprocess"
python ../bin/dbcAmplicons preprocess -b 15001 -q 10 -l 200 -S sampleLookupTable.txt -B barcodeLookupTable.txt \
-P primerLookupTable-dedup.txt -f 6 \
-1 Amplicon_Raw_fastq/Test100K_16S_R1_001.fastq.gz Amplicon_Raw_fastq/test40k_R1_001.fastq.gz \
-O preprocess/trimL --debug >> test_output.txt
#echo "Testing dbcAmplicons splitreads"
#python ../bin/dbcAmplicons splitreads -b 15001 -S sampleLookupTable.txt -1 preprocess/trimL_R1.fastq.gz -O splitreads --debug
echo "Testing dbcAmplicons join"
python ../bin/dbcAmplicons join -t 4 -x 0.25 -1 preprocess/trimL/match_twoprimer_R1.fastq.gz -O join/match_twoprimer >> test_output.txt
echo "Testing dbcAmplicons classify"
python ../bin/dbcAmplicons classify --rdpPath /Users/pmh/Documents/Bioinformatics/programs/RDPTools/classifier.jar -b 7500 -q 10 -l 200 -O join/classify -U join/match_twoprimer.extendedFrags.fastq.gz --debug -p 4 -1 join/match_twoprimer.notCombined_1.fastq.gz -2 join/match_twoprimer.notCombined_2.fastq.gz >> test_output.txt
echo "Testing dbcAmplicons abundance"
python ../bin/dbcAmplicons abundance -O join/abundance -F join/classify.fixrank --debug >> test_output.txt
echo "Testing dbcAmplicons abundance (biom format)"
python ../bin/dbcAmplicons abundance -O join/abundance -F join/classify.fixrank -S sampleLookupTable.txt -b --debug >> test_output.txt
echo "Testing dbcAmplicons abundance (biom format, hdf5)"
python ../bin/dbcAmplicons abundance -O join/abundance -F join/classify.fixrank -S sampleLookupTable.txt --hdf5 --debug >> test_output.txt
#echo "Testing convert2ReadTo4Read"
#convert2ReadTo4Read.py -O backtest/test -1 Amplicon_Raw_fastq/Hkates_R1.40k.fastq.gz >> test_output.txt
#echo "Testing dbcAmplicons preprocess post convert2ReadTo4Read back test"
#python ../bin/dbcAmplicons preprocess -B barcodeLookupTable.txt -O backtest/test2 -1 backtest/test_R1.fastq.gz >> test_output.txt
#echo "Testing splitReadsBySample paired reads"
#splitReadsBySample.py -O allSamples -1 Amplicon_Raw_fastq/Hkates_R1.40k.fastq.gz -2 Amplicon_Raw_fastq/Hkates_R2.40k.fastq.gz --debug >> test_output.txt
#echo "Testing splitReadsBySample single reads"
#splitReadsBySample.py -O allSamples -U Amplicon_Raw_fastq/Hkates_R1.40k.fastq.gz --debug >> test_output.txt
echo "Testing screening"
python ../bin/dbcAmplicons screen -R Amplicon_Raw_fastq/test_map.fa -1 Amplicon_Raw_fastq/Hkates_R1.40k.fastq.gz -2 Amplicon_Raw_fastq/Hkates_R2.40k.fastq.gz -U Amplicon_Raw_fastq/Hkates_R1.40k.fastq.gz --debug -s 2 -o >> test_output.txt
#echo "Removing folders and files"
#rm -rf allSamples backtest join preprocess splitreads out.txt
diff test_validate.txt test_output.txt
|
msettles/dbcAmplicons
|
tests/test_dbcAmplicons_for_local_build.sh
|
Shell
|
lgpl-3.0
| 3,086 |
echo connect / as sysdba
echo @t1.sql $1
echo
|
DarthMaulware/EquationGroupLeaks
|
Leak #4 - Don't Forget Your Base/EQGRP-Auction-File/Linux/etc/oracle/mkt0sql.sh
|
Shell
|
unlicense
| 46 |
#!/bin/bash
#param1 hostname
#param2 ssh user
#param3 ssh pass
#param4 torrentfile name
sshpass -p "$3" scp -o StrictHostKeyChecking=no ~/Torrents/$4 $2@$1:~/Torrents
echo "Copied $4 to $1:~/Torrents"
|
cvswarrior/vmdistribution
|
hu.bme.mit.vmdistribution.app/scrpits/copy_torrent_from_seeder_to_leecher.sh
|
Shell
|
unlicense
| 202 |
#!/bin/bash
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
#
set -e
#-----------------------------------------------------------------------------------------
function lib_user_create ()
{
local v_all_args="$@"
lib_os_get_platform v_os
local v_done=false
if [ ${v_done} == false ] && [ ${v_os} == $LIB_OS_PLATFORM_DEBIAN ];
then
# no quotes around all args !
lib_user_create_debian ${v_all_args}
v_done=true
fi
# if [ ${v_done} == false ] && [ ${v_os} == $LIB_OS_PLATFORM_RHEL];
# then
# ...
# v_done=true
# fi
#
# if [ ${v_done} == false ] && [ ${v_os} == $LIB_OS_PLATFORM_SLES];
# then
# ...
# v_done=true
# fi
if [ ${v_done} == false ];
then
lib_log_error "lib_user_create : Support for the current OS not implemented yet."
exit 1
fi
}
#-----------------------------------------------------------------------------------------
function lib_user_create_debian ()
{
local v_user="$1"
local v_group="$2"
local v_password="$3"
local v_uid="$4"
local v_gid="$5"
lib_validate_var_is_set v_user "Invalid argument 'user'."
lib_validate_var_is_set v_group "Invalid argument 'group'."
lib_validate_var_is_set v_password "Invalid argument 'password'."
lib_validate_var_is_set v_uid "Invalid argument 'uid'."
lib_validate_var_is_set v_gid "Invalid argument 'gid'."
lib_user_create_group_debian "${v_group}" "${v_gid}"
lib_user_create_user_in_group_debian "${v_user}" "${v_group}" "${v_password}" "${v_uid}"
}
#-----------------------------------------------------------------------------------------
function lib_user_create_group_debian ()
{
local v_group="$1"
local v_gid="$2"
lib_validate_var_is_set v_group "Invalid argument 'group'."
lib_validate_var_is_set v_gid "Invalid argument 'gid'."
lib_log_debug "group : ${v_group}"
lib_log_debug "gid : ${v_gid}"
lib_log_info "... look if group '${v_group}' exists already"
local v_group_check=$(grep -c "^${v_group}:" /etc/group)
lib_log_debug "... result of check = [${v_group_check}]"
if [ "${v_group_check}" == "0" ];
then
lib_log_info "... group '${v_group}' do not exists - will be created new"
lib_exec "addgroup --quiet --gid ${v_gid} ${v_group}"
else
lib_log_info "... group '${v_group}' already exists - check gid"
fi
}
#-----------------------------------------------------------------------------------------
function lib_user_create_user_in_group_debian ()
{
local v_user="$1"
local v_group="$2"
local v_password="$3"
local v_uid="$4"
lib_validate_var_is_set v_user "Invalid argument 'user'."
lib_validate_var_is_set v_group "Invalid argument 'group'."
lib_validate_var_is_set v_password "Invalid argument 'password'."
lib_validate_var_is_set v_uid "Invalid argument 'uid'."
lib_log_debug "user : ${v_user}"
lib_log_debug "group : ${v_group}"
lib_log_debug "password : ${v_password}"
lib_log_debug "uid : ${v_uid}"
lib_log_info "... look if user '${v_user}' exists already"
local v_user_check=$(grep -c "^${v_user}:" /etc/passwd)
lib_log_debug "... result of check = [${v_user_check}]"
if [ "${v_user_check}" == "0" ];
then
lib_log_info "... user '${v_user}' do not exists - will be created new"
lib_exec "adduser --quiet --no-create-home --disabled-login --gecos \"\" --uid ${v_uid} --gid ${v_gid} ${v_user}"
else
lib_log_info "... user '${v_user}' already exists - check uid"
fi
lib_log_info "... (re)define password for user '${v_user}'"
lib_exec "echo -e \"${v_password}\n${v_password}\" | passwd ${v_user}"
}
|
andreas-schluens-asdev/asdk
|
sdt/src/main/resources/net/as_development/asdk/sdt/res/lib/lib_user.sh
|
Shell
|
unlicense
| 5,041 |
#!/bin/bash
#author: xuzhigui
#version: v1.0
#email: [email protected]
set -e
SCRIPT=$(readlink -f $0)
SCRIPTPATH=$(dirname $SCRIPT)
#define variables
SOFTDIR='/tmp/softenv'
CURDIR=$(cd "$(dirname "$0")"; pwd)
#check if the user is root
echo -e '\E[31;31m\n====== check if the user is root ======';tput sgr0
if [ $(id -u) != "0" ]; then
echo "Error: You must be root to run this script, please use root to install lnmp"
exit 1
fi
#begin master process to install
mkdir -p $SOFTDIR
function install_nginx {
#change to software dir
cd $SOFTDIR
#prepare for installing nginx
echo -e '\E[31;31m\n====== prepare for installing nginx ======';tput sgr0
yum -y install pcre.x86_64 pcre-devel.x86_64
yum -y install zlib.x86_64 zlib-devel.x86_64
yum -y install gd.x86_64 gd-devel.x86_64
yum -y install openssl*
#adding www user
# echo -e '\E[31;31m\n====== checking if user www exist and add ======';tput sgr0
# if [ $(id www | wc -l) == '0' ];then
# /usr/sbin/useradd -M www
# fi
echo -e '\E[31;31m\n====== checking if nginx tar file exist and download ======';tput sgr0
if [ ! -e "nginx-1.6.0.tar.gz" ];then
wget -c http://nginx.org/download/nginx-1.6.0.tar.gz
fi
/bin/rm -rf nginx-1.6.0 && tar xf nginx-1.6.0.tar.gz
echo -e '\E[31;31m\n====== downloading nginx rtmp module and ngx_pagespeed module ======';tput sgr0
cd nginx-1.6.0 && git clone https://git.oschina.net/xiangyu123/nginx-rtmp-module.git
#installing nginx
echo -e '\E[31;31m\n====== compiling nginx with rtmp module and ngx_pagespeed and install ======';tput sgr0
./configure --prefix=/usr/local/nginx --add-module=./nginx-rtmp-module/ --with-http_flv_module --with-http_mp4_module --with-http_realip_module --with-http_sub_module --with-http_gunzip_module --with-http_gzip_static_module --with-http_stub_status_module --with-pcre --with-http_addition_module --with-http_ssl_module --with-http_realip_module --with-pcre --with-http_addition_module --with-http_image_filter_module --with-debug && make && make install
echo '<?php phpinfo(); ?>' > /usr/local/nginx/html/index.php
chown -R nobody.nobody /usr/local/nginx/html/
if [ ! -d "/usr/local/nginx/cache" ];then
mkdir -p /usr/local/nginx/cache
fi
/bin/rm -rf /etc/sysconfig/nginx && cp $CURDIR/nginx.sysconfig /etc/sysconfig/nginx
/bin/rm -rf /etc/init.d/nginx
cp $CURDIR/nginx.service /etc/init.d/nginx
chmod 755 /etc/init.d/nginx && chkconfig --add nginx && chkconfig nginx on
}
function install_epel {
#installing epel yum repo
echo -e '\E[31;31m\n====== installing epel yum repo ======';tput sgr0
set +e
rpm -ivh http://mirrors.ustc.edu.cn/fedora/epel/6/i386/epel-release-6-8.noarch.rpm
yum clean all
yum makecache
set -e
}
function install_imagick_module {
cp $CURDIR/ImageMagick.tar.gz $SOFTDIR/
cp $CURDIR/jpegsrc.v9.tar.gz $SOFTDIR/
cp $CURDIR/imagick-3.1.0RC1.tgz $SOFTDIR/
echo -e '\E[31;31m\n====== compiling jpeg9 and install ======';tput sgr0
cd $SOFTDIR && tar xf jpegsrc.v9.tar.gz && cd jpeg-9
./configure && make libdir=/usr/lib64 && make libdir=/usr/lib64 install
echo -e '\E[31;31m\n====== compiling imagick and install ======';tput sgr0
cd $SOFTDIR && tar xf ImageMagick.tar.gz && cd ImageMagick-6.9.0-4
./configure && make && make install
ln -s /usr/local/include/ImageMagick-6 /usr/local/include/ImageMagick
echo -e '\E[31;31m\n====== compiling imagick module and install ======';tput sgr0
cd $SOFTDIR && tar xf imagick-3.1.0RC1.tgz && cd imagick-3.1.0RC1
/usr/local/php/bin/phpize
./configure --with-php-config=/usr/local/php/bin/php-config && make && make install
}
function install_sphinx_module {
cp $CURDIR/coreseek-3.2.14.tar.gz $SOFTDIR/
cp $CURDIR/sphinx-1.2.0.tgz $SOFTDIR/
echo -e '\E[31;31m\n====== compiling coreseek and install ======';tput sgr0
cd $SOFTDIR && tar xf coreseek-3.2.14.tar.gz && cd coreseek-3.2.14/csft-3.2.14/api/libsphinxclient/
./configure --prefix=/usr/local/sphinxclient && make && make install
echo -e '\E[31;31m\n====== compiling coreseek and install ======';tput sgr0
cd $SOFTDIR && tar xf sphinx-1.2.0.tgz && cd sphinx-1.2.0
/usr/local/php/bin/phpize
./configure --with-php-config=/usr/local/php/bin/php-config --with-sphinx=/usr/local/sphinxclient && make && make install
}
function install_php5 {
#change to software dir
cd $SOFTDIR
#prepare for installing php5
echo -e '\E[31;31m\n====== prepare for installing php5 ======';tput sgr0
yum -y install libxml2* openssl* libjpeg* libpng* zlib* libcurl* freetype*
#exec install_epel function
# install_epel
#installing libmcrypt
yum -y install libmcrypt-devel.x86_64 libmcrypt.x86_64
#adding www user
echo -e '\E[31;31m\n====== checking if user www exist and add ======';tput sgr0
if [ $(id www | wc -l) == '0' ];then
/usr/sbin/useradd -M www
fi
echo -e '\E[31;31m\n====== checking if php5 tar file exist and download ======';tput sgr0
if [ ! -e "php-5.5.14.tar.gz" ];then
wget -c http://cn2.php.net/distributions/php-5.5.14.tar.gz
fi
/bin/rm -rf php-5.5.14 && tar xf php-5.5.14.tar.gz && cd php-5.5.14
#compiling php5 and install
echo -e '\E[31;31m\n====== compiling php5 and install ======';tput sgr0
./configure --prefix=/usr/local/php --with-config-file-path=/etc --with-mysql=/usr/local/mysql --with-mysqli=/usr/local/mysql/bin/mysql_config --with-iconv-dir=/usr/local --with-freetype-dir --with-jpeg-dir --with-png-dir --with-zlib --with-libxml-dir=/usr --enable-xml --disable-rpath --enable-bcmath --enable-shmop --enable-sysvsem --enable-inline-optimization --with-curl --enable-mbregex --enable-fpm --enable-mbstring --with-mcrypt=/usr/local/libmcrypt --with-gd --enable-gd-native-ttf --with-openssl --with-mhash --enable-pcntl --enable-sockets --with-xmlrpc --enable-zip --enable-soap --without-pear --with-zlib --enable-pdo --with-pdo-mysql --with-mysql --enable-opcache && make && make install
}
function config_php_start_nginx_php {
#change to software dir
cd $SOFTDIR
#build conf and add chkconfig
echo -e '\E[31;31m\n====== build conf and add chkconfig ======';tput sgr0
cp $CURDIR/php.ini /etc/php.ini
mkdir -p /usr/local/php/log
touch /usr/local/php/log/www.log.slow
chmod 777 /usr/local/php/log/www.log.slow
cp $CURDIR/php-fpm.service /etc/init.d/php-fpm && chmod 755 /etc/init.d/php-fpm
chmod 755 /etc/init.d/php-fpm && chkconfig --add php-fpm && chkconfig php-fpm on
cp $CURDIR/php-fpm.conf /usr/local/php/etc/php-fpm.conf
cp $CURDIR/fpm.d.tar.gz /usr/local/php/etc/ && cd /usr/local/php/etc/ && tar xf fpm.d.tar.gz && rm -rf fpm.d.tar.gz
cp $CURDIR/nginx_php.conf /usr/local/nginx/conf/nginx.conf
IP=$(ifconfig em1| grep 'inet addr' | awk -F '.' '{print $4}' | awk '{print $1}')
sed -i -e s/Web30/Web$IP/g /usr/local/nginx/conf/nginx.conf
cp $CURDIR/Configs.tar.gz /usr/local/nginx/conf/ && cd /usr/local/nginx/conf/ && tar xf Configs.tar.gz && rm -rf Configs.tar.gz
cp $CURDIR/cut_nginx_log.sh /usr/local/bin/
echo '00 00 * * * /usr/local/bin/cut_nginx_log.sh' >> /var/spool/cron/root
service crond restart
#starting php-fpm
echo -e '\E[31;31m\n====== starting php-fpm ======';tput sgr0
service php-fpm start || service php-fpm restart
#starting nginx web server
echo -e '\E[31;31m\n====== starting nginx web server ======';tput sgr0
service nginx start || /usr/local/nginx/sbin/nginx
}
function install_mysqld {
#change to software dir
cd $SOFTDIR
#prepare for installing nginx
echo -e '\E[31;31m\n====== prepare for installing mysqld ======';tput sgr0
yum -y install cmake.x86_64 ncurses.x86_64 ncurses-devel.x86_64
#adding mysql user
echo -e '\E[31;31m\n====== checking if user mysql exist and add user ======';tput sgr0
if [ $(id mysql | wc -l) == '0' ];then
/usr/sbin/useradd -M mysql
fi
#check if "data/mysql" exist
echo -e '\E[31;31m\n====== checking if /data/mysql exist and create directory ======';tput sgr0
if [ ! -d "/data/mysql" ];then
mkdir -p /data/mysql
fi
chown -R mysql.mysql /data/mysql
#check if mysqld tar file exist and download
echo -e '\E[31;31m\n====== checking if mysqld tar file exist and download ======';tput sgr0
if [ ! -e "mysql-5.6.19.tar.gz" ];then
wget -c http://dev.mysql.com/get/Downloads/MySQL-5.6/mysql-5.6.19.tar.gz
fi
/bin/rm -rf mysql-5.6.19 && tar xf mysql-5.6.19.tar.gz && cd mysql-5.6.19
#compiling mysqld and install
echo -e '\E[31;31m\n====== compiling mysqld and install ======';tput sgr0
/usr/bin/cmake . -DCMAKE_INSTALL_PREFIX=/usr/local/mysql -DMYSQL_DATADIR=/data/mysql -DSYSCONFDIR=/etc && make && make install
#build conf and add chkconfig
echo -e '\E[31;31m\n====== build conf and add chkconfig ======';tput sgr0
cp support-files/mysql.server /etc/init.d/mysqld
chmod 755 /etc/init.d/mysqld && chkconfig --add mysqld && chkconfig mysqld on
cp $CURDIR/my.cnf /etc/my.cnf
#init mysql database
echo -e '\E[31;31m\n====== init mysql to /data/mysql ======';tput sgr0
/usr/local/mysql/scripts/mysql_install_db --user=mysql --datadir=/data/mysql --basedir=/usr/local/mysql/
#check if /etc/ld.so.conf.d/mysql exist
if [ ! -e '/etc/ld.so.conf.d/mysql.conf' ];then
echo '/usr/local/mysql/lib' > /etc/ld.so.conf.d/mysql.conf && ldconfig
fi
}
function install_phpredis {
#change to software dir
cd $SOFTDIR
#check if phpredis exists and download
echo -e '\E[31;31m\n====== check if phpredis exist and download ======';tput sgr0
if [ -e "phpredis" ];then
rm -rf phpredis
fi
echo -e '\E[31;31m\n====== downloading phpredis ======';tput sgr0
git clone https://github.com/owlient/phpredis
cd phpredis && /usr/local/php/bin/phpize
#compiling phpredis extension
echo -e '\E[31;31m\n====== compiling and install phpredis ======';tput sgr0
./configure --with-php-config=/usr/local/php/bin/php-config --enable-redis && make && make install
}
function install_nodejs {
#change to software dir
cd $SOFTDIR
#installing nodejs and npm
echo -e '\E[31;31m\n====== installing nodejs and npm ======';tput sgr0
yum -y install nodejs.x86_64 nodejs-devel.x86_64 npm.noarch
#installing pomelo freamwork
echo -e '\E[31;31m\n====== installing pomelo freamwork ======';tput sgr0
npm install pomelo -g
#installing pomelo freamwork
echo -e '\E[31;31m\n====== installing nodejs forever ======';tput sgr0
npm install forever -g
}
function install_redis_server {
#change to software dir
cd $SOFTDIR
#check if redis_server packages exist and download
echo -e '\E[31;31m\n====== check if redis_server packages exist and download ======';tput sgr0
if [ ! -e "redis-2.8.17.tar.gz" ];then
wget -c http://download.redis.io/releases/redis-2.8.17.tar.gz
fi
/bin/rm -rf redis-2.8.17 && tar xf redis-2.8.17.tar.gz && cd redis-2.8.17
#compiling redis server and install
echo -e '\E[31;31m\n====== compiling redis server but not install ======';tput sgr0
make && make install
cd utils
sed -i 's/read -p/#read -p/g' install_server.sh
sed -i 's/read -p/#read -p/g' install_server.sh
sed -i 's/redis_${REDIS_PORT}/redis/g' install_server.sh
sed -i 's/redis_$REDIS_PORT/redis/g' install_server.sh
#installing redis server on production system
echo -e '\E[31;31m\n====== installing redis default 6379 server ======';tput sgr0
./install_server.sh
sed -i 's/511/2048/g' /etc/redis/6379.conf
#service redis restart
cp $SCRIPTPATH/redis_6379 /etc/init.d/
cp $SCRIPTPATH/redis_6380 /etc/init.d/
chkconfig --add redis_6379
chkconfig --add redis_6380
chkconfig redis_6379 on
chkconfig redis_6380 on
service redis_6379 restart
#configure 6380 server
echo -e '\E[31;31m\n====== configure redis 6380 server ======';tput sgr0
if [ ! -f "/etc/redis/6380.conf" ];then
cp /etc/redis/6379.conf /etc/redis/6380.conf
echo 'requirepass haibian1qazxsw2' >> /etc/redis/6380.conf
echo 'masterauth haibian1qazxsw2' >> /etc/redis/6380.conf
fi
#check if dir /var/lib/redis/6380 and create
echo -e '\E[31;31m\n====== make sure the dir /var/lib/redis/6380 exist ======';tput sgr0
if [ ! -d "/var/lib/redis/6380" ];then
mkdir -p /var/lib/redis/6380
fi
#configure the file /etc/redis/6380.conf
echo -e '\E[31;31m\n====== configure the file /etc/redis/6380.conf ======';tput sgr0
sed -i 's/redis.pid/redis_6380.pid/g' /etc/redis/6380.conf
sed -i 's/port 6379/port 6380/g' /etc/redis/6380.conf
sed -i 's/511/2048/g' /etc/redis/6380.conf
sed -i 's/redis.log/redis_6380.log/g' /etc/redis/6380.conf
sed -i 's#redis/6379#redis/6380#g' /etc/redis/6380.conf
#start 6380 redis server
echo -e '\E[31;31m\n====== starting redis 6380 server ======';tput sgr0
# /usr/local/bin/redis-server /etc/redis/6380.conf
service redis_6380 start
#check the server port
echo -e '\E[31;31m\n====== check the server port ======';tput sgr0
netstat -antup | grep redis
}
install_nginx
install_mysqld
install_php5
install_imagick_module
install_sphinx_module
install_phpredis
install_redis_server
install_nodejs
config_php_start_nginx_php
echo -e '\E[31;31m\n====== Congratulations!! All have finished !! ======';tput sgr0
|
xiangyu123/centos6.5_init
|
softinstall.sh
|
Shell
|
apache-2.0
| 13,346 |
fixk=$1
for i in 0.3 0.35 0.4 0.45
do
python ./experiment/traintestfixk.py --train "aviation" --fixk ${fixk} --neutral_threshold ${i} --trials 5 --budget 3000 --step-size 10 --bootstrap 50 --maxiter 300 > ../results/fixk-neutral/AVIATION-FIX-${fixk}-TH${i}-NEUTRAL.TXT 2> ../results/fixk-neutral/AVIATION-FIX-${fixk}-TH${i}-NEUTRAL.TXT
done
|
mramire8/active
|
scripts/neu_fixk.sh
|
Shell
|
apache-2.0
| 351 |
#!/bin/bash
set -x
set -e
: Set locale:
: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
sudo locale-gen UTF-8 || :
: Installing packages:
: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
sudo apt-get update -y
sudo apt-get install -y curl python-software-properties python g++ make unzip lua5.1 luarocks wget software-properties-common openjdk-6-jre nginx git autoconf bison build-essential libssl-dev libyaml-dev libreadline6-dev zlib1g-dev libncurses5-dev libffi-dev libgdbm3 libgdbm-dev
if [ ! -f /usr/local/lib/liblmdb.so ]
then
: Installing Lightningmdb:
: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
mkdir -p ~/installs
pushd ~/installs
git clone https://gitorious.org/mdb/mdb.git
cd mdb/libraries/liblmdb/
make
sudo make install
sudo ldconfig # we need to rebuild the cache to have libmdb.so discoverable
popd
fi;
: Installing rocks:
: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
sudo luarocks install copas 1.1.6-1
sudo luarocks install lpack
sudo luarocks install luaposix
sudo luarocks install bitlib
sudo luarocks install lightningmdb
if ! which npm
then
: Installing nodejs, coffee-script:
: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
sudo add-apt-repository ppa:chris-lea/node.js -y
sudo apt-get update -y
sudo apt-get install -y nodejs
sudo npm install -g [email protected]
fi
if [ ! -e ~/.rbenv ]
then
git clone https://github.com/sstephenson/rbenv.git ~/.rbenv
echo 'export PATH="$HOME/.rbenv/bin:$PATH"' >> ~/.bash_profile
echo 'eval "$(rbenv init -)"' >> ~/.bash_profile
mkdir -p ~/ruby-build
pushd ~/ruby-build
git clone https://github.com/sstephenson/ruby-build.git .
sudo ./install.sh
rbenv install 1.9.3-p551
rbenv global 1.9.3-p551
popd
source ~/.bash_profile
fi
if ! which sencha
then
: Installing Sencha cmd:
: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
pushd /tmp
rm -f SenchaCmd-3.1.2.342-linux-x64.run.zip SenchaCmd-3.1.2.342-linux-x64.run
wget http://cdn.sencha.com/cmd/3.1.2.342/SenchaCmd-3.1.2.342-linux-x64.run.zip
unzip SenchaCmd-3.1.2.342-linux-x64.run.zip
chmod +x SenchaCmd-3.1.2.342-linux-x64.run
bash -l -c "/tmp/SenchaCmd-3.1.2.342-linux-x64.run --mode unattended"
popd
fi
: Setting up Nginx
: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
source_nginx_conf="/vagrant/mule_nginx.conf"
target_nginx_conf="/etc/nginx/sites-enabled/mule_nginx.conf"
if ! [ -L $target_nginx_conf ]
then
sudo ln -s $source_nginx_conf $target_nginx_conf
fi
sudo service nginx restart
|
shmul/mule
|
vagrant/setup_dev.sh
|
Shell
|
apache-2.0
| 2,627 |
#!/bin/sh
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
echo "WARNING: This binary has been renamed from vdsgetsystemstate to "
echo " vdsgetclusterstate. Currently, this script calls the other. "
echo " as a convinience. This script will be removed later."
vdsgetclusterstate $@
|
vespa-engine/vespa
|
vespaclient/bin/vdsgetsystemstate.sh
|
Shell
|
apache-2.0
| 356 |
#!/bin/sh
#
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Rebuild the host GCC toolchain binaries from sources.
#
# NOTE: this script does not rebuild gdb, see build-host-gdb.sh for this.
#
# include common function and variable definitions
. `dirname $0`/prebuilt-common.sh
PROGRAM_PARAMETERS=""
PROGRAM_DESCRIPTION="\
This program is used to deploy mclinker (ld.mcld) to GCC directories.
Although ld.mcld depends on lots of LLVM modules and is built in
build-llvm.sh to reduce long LLVM compilation time, it can be used as
a drop-in replacement for ld.bfd and ld.gold in GCC.
Running after completion of both build-llvm.sh and build-[host-]gcc.sh,
this script copy toolchains/llvm-$DEFAULT_LLVM_VERSION/prebuilt/$SYSTEM/bin/ld.mcld[.exe]
to be sibling of ld in all GCC directories with same HOST_OS and bitness,
ie. {linux, darwin, windows} x {64, 32}
If --systems isn't specified, this script discovers all ld.mcld[.exe] in
toolchains/llvm-$DEFAULT_LLVM_VERSION
Note that one copy of ld.mcld serves all GCC {4.7, 4.6, 4.4.3} x {arm, x86, mips}.
GCC passes -m flag for ld.mcld to figure out the right target.
"
NDK_DIR=
register_var_option "--ndk-dir=<path>" NDK_DIR "NDK installation directory"
PACKAGE_DIR=
register_var_option "--package-dir=<path>" PACKAGE_DIR "Create archive tarball in specific directory"
SYSTEMS=
register_var_option "--systems=<list>" SYSTEMS "List of host systems to deply ld.mcld for"
extract_parameters "$@"
if [ -z "$NDK_DIR" ] ; then
NDK_DIR=$ANDROID_NDK_ROOT
log "Auto-config: --ndk-dir=$NDK_DIR"
else
if [ ! -d "$NDK_DIR" ] ; then
echo "ERROR: NDK directory does not exists: $NDK_DIR"
exit 1
fi
fi
if [ "$PACKAGE_DIR" ]; then
mkdir -p "$PACKAGE_DIR"
fail_panic "Could not create package directory: $PACKAGE_DIR"
fi
cd $NDK_DIR
if [ -z "$SYSTEMS" ]; then
# find all ld.mcld
ALL_MCLDS=`find toolchains/llvm-$DEFAULT_LLVM_VERSION -name "ld.mcld*"`
for MCLD in $ALL_MCLDS; do
# compute SYSTEM of this ld.mcld
SYSTEM=${MCLD%%/bin/*}
SYSTEM=${SYSTEM##*prebuilt/}
SYSTEMS=$SYSTEMS" $SYSTEM"
done
fi
for SYSTEM in $SYSTEMS; do
HOST_EXE=
if [ "$SYSTEM" != "${SYSTEM%%windows*}" ] ; then
HOST_EXE=.exe
fi
MCLD=toolchains/llvm-$DEFAULT_LLVM_VERSION/prebuilt/$SYSTEM/bin/ld.mcld$HOST_EXE
test -f "$MCLD" || fail_panic "Could not find $MCLD"
# find all GNU ld with the same SYSTEM
ALL_LDS=`find toolchains \( -name "*-ld" -o -name "ld" -o -name "*-ld.exe" -o -name "ld.exe" \) | grep $SYSTEM/`
ALL_LD_MCLDS=
for LD in $ALL_LDS; do
LD_NOEXE=${LD%%.exe}
LD_MCLD=${LD_NOEXE}.mcld$HOST_EXE
run rm -f "$LD_MCLD"
run cp -a "$MCLD" "$LD_MCLD"
ALL_LD_MCLDS=$ALL_LD_MCLDS" $LD_MCLD"
done
# package
if [ "$PACKAGE_DIR" ]; then
ARCHIVE="ld.mcld-$SYSTEM.tar.bz2"
#echo $ARCHIVE
echo "Packaging $ARCHIVE"
pack_archive "$PACKAGE_DIR/$ARCHIVE" "$NDK_DIR" $ALL_LD_MCLDS
fi
done
dump "Done."
|
yongjhih/android_tools
|
ndk/build/tools/deploy-host-mcld.sh
|
Shell
|
apache-2.0
| 3,593 |
docker run --name ontop_mssql_running -e 'ACCEPT_EULA=Y' -e 'SA_PASSWORD=Mssql1.0' -p 1533:1433 -d ontop/ontop-mssql
|
ontop/ontop-dockertests
|
mssql/run-mssql.sh
|
Shell
|
apache-2.0
| 116 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#######################################################################
# please modify cassandra_home to the path of your cassandra directory.
#######################################################################
#cassandra_home=`dirname $0`/..
cassandra_home=/home/jinsu/research/cass-prj/cass-fate-system-0.1
#FM_MY_JARS=/home/jinsu/research/java-rtjar
#jinsu commented out FROM
# The directory where Cassandra's configs live (required)
#CASSANDRA_CONF=$cassandra_home/conf
# This can be the path to a jar file, or a directory containing the
# compiled classes. NOTE: This isn't needed by the startup script,
# it's just used here in constructing the classpath.
#cassandra_bin=$cassandra_home/build/classes
#cassandra_bin=$cassandra_home/build/cassandra.jar #this was commented out to begin with.
# JAVA_HOME can optionally be set here
#JAVA_HOME=/usr/local/jdk6 #this was commented out to begin with.
# The java classpath (required)
#CLASSPATH=$CASSANDRA_CONF:$cassandra_bin
#for jar in $cassandra_home/lib/*.jar; do
# CLASSPATH=$CLASSPATH:$jar
#done
#jinsu commented out TO
# adding:
# ------------------------
# 1) cassandra classes
# ------------------------
#cassandra_classes="$cassandra_home/build/classes"
#CLASSPATH=${CLASSPATH}:$cassandra_classes
# ------------------------
# 2) conf folder
# ------------------------
#CASSANDRA_CONF="$cassandra_home/conf"
#CLASSPATH=${CLASSPATH}:$CASSANDRA_CONF
# ------------------------
# 3) lib jars
# ------------------------
for jar in $cassandra_home/lib/*.jar; do
CLASSPATH=${CLASSPATH}:$jar
done
# ------------------------
# 4) aspect stuff
# ------------------------
FM_ASPECTJ1="$FM_MY_JARS/aspectj/aspectjrt-1.6.4.jar"
FM_ASPECTJ2="$FM_MY_JARS/aspectj/aspectjtools-1.6.4.jar"
CLASSPATH=${CLASSPATH}:${FM_ASPECTJ1}:${FM_ASPECTJ2}
# ------------------------
# 5) JOL stuff
# ------------------------
FM_JOL="$FM_MY_JARS/jol/jol.jar"
CLASSPATH=${CLASSPATH}:${FM_JOL}
# ------------------------
# 6) RPC stuffs
# ------------------------
FM_RPC1="$FM_MY_JARS/apache-xmlrpc-3.1.3/lib/xmlrpc-client-3.1.3.jar"
FM_RPC2="$FM_MY_JARS/apache-xmlrpc-3.1.3/lib/xmlrpc-server-3.1.3.jar"
FM_RPC3="$FM_MY_JARS/apache-xmlrpc-3.1.3/lib/xmlrpc-common-3.1.3.jar"
FM_RPC4="$FM_MY_JARS/apache-xmlrpc-3.1.3/lib/ws-commons-util-1.0.2.jar"
FM_RPC5="$FM_MY_JARS/apache-xmlrpc-3.1.3/lib/commons-logging-1.1.jar"
CLASSPATH=${CLASSPATH}:${FM_RPC1}:${FM_RPC2}:${FM_RPC3}:${FM_RPC4}:${FM_RPC5}
# ------------------------
# 7) boot opts
# ------------------------
FM_WOVENRT="$cassandra_home/build/woven-rt.jar"
FM_BOOT_OPTS="-Xbootclasspath:$FM_WOVENRT"
# #######################################################################
# jinsu, boot class path, add FI_BOOT_OPTS,
# #######################################################################
# -------------------------------------------------------
# please modify these three entries accordingly (see MAC and Linux configuration)
# -------------------------------------------------------
# This is a possible MAC configuration
# -------------------------------------------------------
#FI_JAVA_CLASSES_DIR=/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Classes/
#FI_JSSE="$FI_JAVA_CLASSES_DIR/jsse.jar"
#FI_JCE="$FI_JAVA_CLASSES_DIR/jce.jar"
# -------------------------------------------------------
# This is a possible Linux configuration
# -------------------------------------------------------
FI_JAVA_CLASSES_DIR=/usr/lib/jvm/java-6-sun/jre/lib
FI_JSSE="$FI_JAVA_CLASSES_DIR/jsse.jar"
FI_JCE="$FI_JAVA_CLASSES_DIR/jce.jar"
# -------------------------------------------------------
# boot classs
# -------------------------------------------------------
FI_LIB_DIR="$cassandra_home/lib/fi"
FI_WOVENRT="$cassandra_home/build/woven-rt.jar"
FI_BOOT_OPTS="-Xbootclasspath:$FI_WOVENRT:$FI_JSSE:$FI_JCE"
# -------------------------------------------------------
# add extra classes stuff
# -------------------------------------------------------
FI_JOL="$FI_LIB_DIR/jol/jol.jar"
#FI_OLG="build/classes/olg.jar"
FI_RPC1="$FI_LIB_DIR/xmlrpc/xmlrpc-client-3.1.3.jar"
FI_RPC2="$FI_LIB_DIR/xmlrpc/xmlrpc-server-3.1.3.jar"
FI_RPC3="$FI_LIB_DIR/xmlrpc/xmlrpc-common-3.1.3.jar"
FI_RPC4="$FI_LIB_DIR/xmlrpc/ws-commons-util-1.0.2.jar"
FI_RPC5="$FI_LIB_DIR/xmlrpc/commons-logging-1.1.jar"
# -------------------------------------------------------
# the final classpath
# -------------------------------------------------------
CLASSPATH=${CLASSPATH}:${FI_JOL} #:${FI_OLG}
CLASSPATH=${CLASSPATH}:${FI_RPC1}:${FI_RPC2}:${FI_RPC3}:${FI_RPC4}:${FI_RPC5}
# adding:
# ------------------------
# 1) cassandra classes
# ------------------------
cassandra_classes="$cassandra_home/build/classes"
CLASSPATH=${CLASSPATH}:$cassandra_classes
# ------------------------
# 2) conf folder
# ------------------------
CASSANDRA_CONF="$cassandra_home/conf"
CLASSPATH=${CLASSPATH}:$CASSANDRA_CONF
# ------------------------
# 3) lib jars
# ------------------------
for jar in $cassandra_home/lib/*.jar; do
CLASSPATH=${CLASSPATH}:$jar
done
touch /tmp/cassandra-fi-build-info
echo "Cassandra run classpath --> $CLASSPATH" >> /tmp/cassandra-fi-build-info
echo "Cassandra run homepath --> $cassandra_home" >> /tmp/cassandra-fi-build-info
# -------------------------------------------------------
# Arguments to pass to the JVM
JVM_OPTS=" \
-ea \
-Xms512M \
-Xmx512M \
-XX:+UseParNewGC \
-XX:+UseConcMarkSweepGC \
-XX:+CMSParallelRemarkEnabled \
-XX:SurvivorRatio=8 \
-XX:MaxTenuringThreshold=1 \
-XX:+HeapDumpOnOutOfMemoryError \
-Dcom.sun.management.jmxremote.port=8080 \
-Dcom.sun.management.jmxremote.ssl=false \
-Dcom.sun.management.jmxremote.authenticate=false
$FI_BOOT_OPTS"
|
ucare-uchicago/cass-fate-system
|
bin/cassandra.in.sh
|
Shell
|
apache-2.0
| 6,576 |
#!/bin/bash
# REPOS=... - manually specify repos
if [ -z "$PG_PASS" ]
then
echo "$0: you need to set PG_PASS=..."
exit 1
fi
if [ -z "$1" ]
then
echo "$0: you need to provide 1st argument proect database name: gha, prometheus, cncf, allprj etc."
exit 2
fi
if [ -z "$2" ]
then
echo "$0: you need to provide 2nd argument date-from in YYYY-MM-DD format"
exit 3
fi
if [ -z "$3" ]
then
echo "$0: you need to provide 3rd date-to in YYYY-MM-DD format"
exit 4
fi
if [ -z "$REPOS" ]
then
repos=`db.sh psql "${1}" -tAc "select distinct name from gha_repos"`
else
repos="${REPOS//[,\']/}"
fi
cwd=`pwd`
log="${cwd}/git.log"
> "${log}"
for repo in $repos
do
if [[ ! $repo == *"/"* ]]
then
echo "malformed repo $repo, skipping"
continue
fi
cd "${HOME}/devstats_repos/$repo" 2>/dev/null || echo "no $repo repo"
git log --all --pretty=format:"%aE~~~~%cE~~~~%H" --since="${2}" --until="${3}" >> "${log}" 2>/dev/null
if [ ! "$?" = "0" ]
then
echo "problems getting $repo git log"
else
echo "" >> "${log}"
fi
done
cd "${cwd}"
sed -i '/^$/d' "${log}"
vim --not-a-term -c '%s/"//g' -c '%s/,//g' -c '%s/\~\~\~\~/,/g' -c 'wq!' "${log}"
echo "author_email,committer_email,sha" > out
cat "${log}" | sort | uniq >> out && mv out "${log}"
ls -l "${log}"
cp "${log}" /tmp/
bots=`cat ~/dev/go/src/github.com/cncf/devstats/util_sql/only_bots.sql`
commits=`db.sh psql gha -q -c 'create temp table tcom(c text, a text, sha varchar(40))' -c "copy tcom from '/tmp/git.log' with (format csv)" -c "create temp table bots as select distinct email from gha_actors_emails where actor_id in (select id from gha_actors where lower(login) $bots)" -c "select count(distinct sha) from tcom where a not in (select email from bots) and c not in (select email from bots)" -tAc 'drop table bots' -c 'drop table tcom'`
echo "${1}: ${2} - ${3}: ${commits} commits"
echo "$commits" > commits.txt
|
cncf/velocity
|
shells/get_git_commits_count.sh
|
Shell
|
apache-2.0
| 1,898 |
#!/bin/sh
PARAMETERS_FILE=$1
RESOURCE_GROUP=$2
default='westus2'
REGION=${3-$default}
#az group deployment create --template-file uniqueString.json --resource-group $RESOURCE_GROUP --output table
python3 deployment_ms.py parameters/${PARAMETERS_FILE}.yaml
#az group create --name $RESOURCE_GROUP --location $REGION --output table
az group deployment create -n NonRallyDeployment --mode Incremental --verbose --template-file generatedTemplate.json --parameters @parameters/parameters.json --resource-group $RESOURCE_GROUP --output table
|
couchbase-partners/azure-resource-manager-couchbase
|
generator/deploy_ms.sh
|
Shell
|
apache-2.0
| 539 |
#!/bin/sh
NAME=mat.cov
lcov --quiet --base-directory . --directory . -c -o $NAME
lcov --quiet --remove $NAME "/usr*" -o $NAME
lcov --quiet --remove $NAME "/build*" -o $NAME
lcov --quiet --remove $NAME "*/ada-util/*" -o $NAME
lcov --quiet --remove $NAME "/opt*" -o $NAME
lcov --quiet --remove $NAME "*/regtests*" -o $NAME
lcov --quiet --remove $NAME "*/adainclude*" -o $NAME
lcov --quiet --remove $NAME "*/b__*" -o $NAME
rm -rf cover
genhtml --quiet -o ./cover -t "test coverage" --num-spaces 4 $NAME
|
stcarrez/mat
|
mat/coverage.sh
|
Shell
|
apache-2.0
| 502 |
# ----------------------------------------------------------------------------
#
# Package : mallet
# Version : 2.0
# Source repo : https://github.com/mimno/Mallet.git
# Tested on : ubuntu_16.04
# Script License: Apache License, Version 2 or later
# Maintainer : Atul Sowani <[email protected]>
#
# Disclaimer: This script has been tested in non-root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
#!/bin/bash
# Install dependencies.
sudo apt-get update -y
sudo apt-get install -y git ant openjdk-8-jdk openjdk-8-jre
export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-ppc64el
export PATH=$PATH:$JAVA_HOME/bin
# Clone and build source.
git clone https://github.com/mimno/Mallet.git
cd Mallet
ant
|
ppc64le/build-scripts
|
m/mallet/mallet_ubuntu_16.04.sh
|
Shell
|
apache-2.0
| 996 |
#!/bin/bash
#########################
# #
# Configuration Options #
# #
#########################
# PHP Detections, if this fails hard code it
PHP_BIN=$( which php )
# List of cruns to execute
CRONS="findblock.php proportional_payout.php pplns_payout.php pps_payout.php blockupdate.php payouts.php liquid_payout.php"
# Output additional runtime information
VERBOSE="0"
# Base path for PIDFILE, (full path).
BASEPATH="/tmp"
# Subfolder for PIDFILE, so it's path will be unique in a multipool server.
# Path relative to BASEPATH.
# Eg. SUBFOLDER="LTC"
SUBFOLDER=""
################################################################
# #
# You probably don't need to change anything beyond this point #
# #
################################################################
# Mac OS detection
OS=`uname`
case "$OS" in
Darwin) READLINK=$( which greadlink ) ;;
*) READLINK=$( which readlink ) ;;
esac
if [[ ! -x $READLINK ]]; then
echo "readlink not found, please install first";
exit 1;
fi
# My own name
ME=$( basename $0 )
# Overwrite some settings via command line arguments
while getopts "hfvp:d:" opt; do
case "$opt" in
h|\?)
echo "Usage: $0 [-v] [-p PHP_BINARY] [-d SUBFOLDER]";
exit 0
;;
v) VERBOSE=1 ;;
f) PHP_OPTS="$PHP_OPTS -f";;
p) PHP_BIN=$OPTARG ;;
d) SUBFOLDER=$OPTARG ;;
:)
echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
esac
done
# Path to PID file, needs to be writable by user running this
PIDFILE="${BASEPATH}/${SUBFOLDER}/${ME}.pid"
# Clean PIDFILE path
PIDFILE=$($READLINK -m "$PIDFILE")
# Create folders recursively if necessary
if ! $(mkdir -p $( dirname $PIDFILE)); then
echo "Error creating PIDFILE path: $( dirname $PIDFILE )"
exit 1
fi
# Find scripts path
if [[ -L $0 ]]; then
CRONHOME=$( dirname $( $READLINK $0 ) )
else
CRONHOME=$( dirname $0 )
fi
# Change working director to CRONHOME
if ! cd $CRONHOME 2>/dev/null; then
echo "Unable to change to working directory \$CRONHOME: $CRONHOME"
exit 1
fi
# Confiuration checks
if [[ -z $PHP_BIN || ! -x $PHP_BIN ]]; then
echo "Unable to locate you php binary."
exit 1
fi
if [[ ! -e 'shared.inc.php' ]]; then
echo "Not in cronjobs folder, please ensure \$CRONHOME is set!"
exit 1
fi
# Our PID of this shell
PID=$$
if [[ -e $PIDFILE ]]; then
echo "Cron seems to be running already"
RUNPID=$( cat $PIDFILE )
if ps fax | grep -q "^\<$RUNPID\>"; then
echo "Process found in process table, aborting"
exit 1
else
echo "Process $RUNPID not found. Plese remove $PIDFILE if process is indeed dead."
exit 1
fi
fi
# Write our PID file
echo $PID > $PIDFILE
for cron in $CRONS; do
[[ $VERBOSE == 1 ]] && echo "Running $cron, check logfile for details"
$PHP_BIN $cron $PHP_OPTS
done
# Remove pidfile
rm -f $PIDFILE
|
ZenProductions/CommisionFaucet
|
cronjobs/run-payout.sh
|
Shell
|
apache-2.0
| 2,985 |
#!/bin/bash
#
# Install dependencies on Travis
#
# Inputs:
# TRAVIS_OS_NAME - "linux" or "osx"
# BUILD_STATIC - "true" or "false"
#
# Static builds use scripts to download libarchive, libconfuse, and libsodium,
# so those are only installed on shared library builds.
#
set -e
set -v
source scripts/third_party_versions.sh
MAKE_FLAGS=-j4
if [[ "$TRAVIS_OS_NAME" = "linux" ]]; then
DEPS_INSTALL_DIR=/usr
else
DEPS_INSTALL_DIR=/usr/local
fi
install_confuse() {
curl -LO https://github.com/martinh/libconfuse/releases/download/v$CONFUSE_VERSION/confuse-$CONFUSE_VERSION.tar.xz
tar xf confuse-$CONFUSE_VERSION.tar.xz
pushd confuse-$CONFUSE_VERSION
./configure --prefix=$DEPS_INSTALL_DIR --disable-examples
make $MAKE_FLAGS
sudo make install
popd
}
install_sodium() {
curl -LO https://github.com/jedisct1/libsodium/releases/download/$LIBSODIUM_VERSION/libsodium-$LIBSODIUM_VERSION.tar.gz
tar xf libsodium-$LIBSODIUM_VERSION.tar.gz
pushd libsodium-$LIBSODIUM_VERSION
./configure --prefix=$DEPS_INSTALL_DIR
make $MAKE_FLAGS
sudo make install
popd
}
if [[ "$TRAVIS_OS_NAME" = "linux" ]]; then
sudo apt-get update -qq
sudo apt-get install -qq autopoint mtools unzip zip help2man
case $MODE in
windows)
sudo dpkg --add-architecture i386
sudo apt-get update
sudo apt-get install -qq gcc-mingw-w64-x86-64 wine
;;
singlethread|dynamic)
sudo apt-get install -qq libarchive-dev
install_confuse
install_sodium
pip install --user cpp-coveralls
;;
static)
# Need fpm when building static so that we can make the .deb and .rpm packages
sudo apt-get install -qq rpm
gem install fpm
;;
raspberrypi)
sudo apt-get install -qq libarchive-dev qemu binfmt-support qemu-user-static
gem install fpm
pushd ~
git clone https://github.com/raspberrypi/tools.git --depth 1
popd
;;
esac
else
# OSX
brew update
# Fix "/usr/local/Library/ENV/4.3/sed: Not such file" errors
brew uninstall libtool
brew install libtool
brew install mtools
brew install gettext
if [[ "$MODE" = "dynamic" ]]; then
brew install libarchive libsodium confuse
fi
# Fix brew breakage in autotools
mkdir -p /usr/local/Library/ENV
ln -s /usr/local/Library/Homebrew/shims/super /usr/local/Library/ENV/4.3
ls /usr/local/Library/ENV/4.3
fi
|
michaelkschmidt/fwup
|
scripts/ci_install_deps.sh
|
Shell
|
apache-2.0
| 2,591 |
#!/bin/sh
echo "### 开始安装 Rust 库..."
cargo install \
loc
echo "结束安装..."
|
huso-io/generator-node-fullstack
|
generators/app/templates/tool/cargo.install.sh
|
Shell
|
apache-2.0
| 94 |
#!/bin/bash
pushd /root > /dev/null
if [ -d "ephemeral-hdfs" ]; then
echo "Ephemeral HDFS seems to be installed. Exiting."
return 0
fi
case "$HADOOP_MAJOR_VERSION" in
1)
wget http://s3.amazonaws.com/spark-related-packages/hadoop-2.7.0.tar.gz
echo "Unpacking Hadoop"
tar xvzf hadoop-2.7.0.tar.gz > /tmp/spark-ec2_hadoop.log
rm hadoop-*.tar.gz
mv hadoop-2.7.0/ ephemeral-hdfs/
sed -i 's/-jvm server/-server/g' /root/ephemeral-hdfs/bin/hadoop
cp /root/hadoop-native/* /root/ephemeral-hdfs/lib/native/
;;
2)
wget http://s3.amazonaws.com/spark-related-packages/hadoop-2.0.0-cdh4.2.0.tar.gz
echo "Unpacking Hadoop"
tar xvzf hadoop-*.tar.gz > /tmp/spark-ec2_hadoop.log
rm hadoop-*.tar.gz
mv hadoop-2.0.0-cdh4.2.0/ ephemeral-hdfs/
# Have single conf dir
rm -rf /root/ephemeral-hdfs/etc/hadoop/
ln -s /root/ephemeral-hdfs/conf /root/ephemeral-hdfs/etc/hadoop
cp /root/hadoop-native/* /root/ephemeral-hdfs/lib/native/
;;
yarn)
wget http://s3.amazonaws.com/spark-related-packages/hadoop-2.4.0.tar.gz
echo "Unpacking Hadoop"
tar xvzf hadoop-*.tar.gz > /tmp/spark-ec2_hadoop.log
rm hadoop-*.tar.gz
mv hadoop-2.4.0/ ephemeral-hdfs/
# Have single conf dir
rm -rf /root/ephemeral-hdfs/etc/hadoop/
ln -s /root/ephemeral-hdfs/conf /root/ephemeral-hdfs/etc/hadoop
;;
*)
echo "ERROR: Unknown Hadoop version"
return 1
esac
/root/spark-ec2/copy-dir /root/ephemeral-hdfs
popd > /dev/null
|
madhavhugar/spark-ec2
|
ephemeral-hdfs/init.sh
|
Shell
|
apache-2.0
| 1,505 |
#!/usr/bin/env bash
# so that CI script will fail when e.g. previous command succeeds:
function fail_on_success
{
exit_status=$?
if [[ "$exit_status" -eq "0" ]]; then
echo "Build failed due to last exit status being $exit_status"
exit 1
fi
}
|
NifTK/NiftyNet
|
ci/utils.sh
|
Shell
|
apache-2.0
| 271 |
#!/usr/bin/env bash
# This is used to run an application using qemu and then check if
# the output contains 'ERROR: expr' which is output by AC_TEST on
# errors. The number of occurances of 'ERROR: expr' becomes the exit
# code. Thus if no errors then the exit code it 0. I needed to do this
# because I couldn't find a way to have qemu exit with a error code
# coming from the application being run.
#
# Parameters:
# $1 is the file to execute
echo "param1=$1"
param1_filename=$1
shift
tmpfile=$(mktemp /tmp/sadie-qemu-system-x86_64.runner.XXX)
qemu-system-x86_64 $@ -nographic -no-reboot -drive format=raw,file=$param1_filename </dev/null 2>&1 | tee ${tmpfile}
err_count=$(grep -c -e 'ERROR:+: ' ${tmpfile})
#echo err_count=${err_count}
((${err_count} == 0)) && rm ${tmpfile}
exit ${err_count}
|
winksaville/sadie
|
tools/qemu-system-x86_64.runner.sh
|
Shell
|
apache-2.0
| 798 |
#!/bin/bash
# Copyright 2016 - 2020 Crunchy Data Solutions, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source /opt/cpm/bin/common_lib.sh
#export PGPASSFILE=/tmp/pgpass
#
# $1 is the HOSTNAME
# $2 is the PG PORT
# $3 is the PG USER
echo_info "Initializing pgbench database.."
psql -p $2 -h $1 -c 'create database pgbench' -U $3 postgres
pgbench --host=$1 \
--port=$2 \
--username=$3 \
--scale=5 \
--initialize pgbench
echo_info "Adding some load.."
pgbench --host=$1 \
--port=$2 \
--username=$3 \
--time=10 \
--client=15 \
pgbench
|
the1forte/crunchy-containers
|
bin/bench/run-benchmark.sh
|
Shell
|
apache-2.0
| 1,046 |
#!/usr/bin/env bash
set -e
go generate $(go list ./...)
|
fragments/fragments
|
scripts/genmocks.sh
|
Shell
|
apache-2.0
| 58 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env bash
# Helper to build release artifacts - binaries only.
#
# Usage (from repo root):
# ./build/build-binaries.sh -o . -v v0.1.0
# Collects args and defines build functions.
source "$(dirname "${0}")/build-include.sh"
# Build binaries only.
build_binaries
|
GoogleCloudPlatform/solution-acceleration-toolkit
|
build/build-binaries.sh
|
Shell
|
apache-2.0
| 850 |
#!/bin/sh
cd $(dirname $0)
cd ../app
mvn clean package
ret=$?
if [ $ret -ne 0 ]; then
exit $ret
fi
rm -rf target
./gradlew build
ret=$?
if [ $ret -ne 0 ]; then
exit $ret
fi
rm -rf build
cd ../initial
mvn clean compile
ret=$?
if [ $ret -ne 0 ]; then
exit $ret
fi
rm -rf target
./gradlew compileJava
ret=$?
if [ $ret -ne 0 ]; then
exit $ret
fi
rm -rf build
exit
|
allemaos/template-microservices-app
|
test/run.sh
|
Shell
|
apache-2.0
| 375 |
# ----------------------------------------------------------------------------
#
# Package : reflections
# Version : 0.9.11
# Source repo : https://github.com/ronmamo/reflections
# Tested on : rhel_7.3
# Script License: Apache License, Version 2 or later
# Maintainer : Atul Sowani <[email protected]>
#
# Disclaimer: This script has been tested in non-root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
#!/bin/bash
# Install dependencies.
sudo yum update -y
sudo yum install -y git gcc wget make python java-1.7.0-openjdk-devel.ppc64le tar
export JAVA_HOME=/usr/lib/jvm/java-1.7.0-openjdk
export PATH=$JAVA_HOME/bin:$PATH
# Install maven.
wget http://archive.apache.org/dist/maven/maven-3/3.3.3/binaries/apache-maven-3.3.3-bin.tar.gz
tar -zxf apache-maven-3.3.3-bin.tar.gz
sudo cp -R apache-maven-3.3.3 /usr/local
sudo ln -s /usr/local/apache-maven-3.3.3/bin/mvn /usr/bin/mvn
# Clone and build source code.
git clone https://github.com/ronmamo/reflections
cd reflections
mvn dependency:list -DexcludeTransitive
mvn -DskipTests package
mvn test -fn
|
ppc64le/build-scripts
|
r/reflections/reflections_rhel_7.3.sh
|
Shell
|
apache-2.0
| 1,355 |
#!/bin/bash
#
# Copyright 2016 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Tests the examples provided in Bazel
#
# Load the test setup defined in the parent directory
CURRENT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${CURRENT_DIR}/../../integration_test_setup.sh" \
|| { echo "integration_test_setup.sh not found!" >&2; exit 1; }
if [ "${PLATFORM}" != "darwin" ]; then
echo "This test suite requires running on OS X" >&2
exit 0
fi
function set_up() {
copy_examples
setup_objc_test_support
# Find the version number for an installed 7-series or 8-series Xcode
# (any sub-version will do)
bazel query "labels('versions', '@local_config_xcode//:host_xcodes')" \
--output xml | grep 'name="version"' \
| sed -E 's/.*(value=\"(([0-9]|.)+))\".*/\2/' > xcode_versions
XCODE_VERSION=$(cat xcode_versions | grep -m1 '7\|8\|9')
# Allow access to //external:xcrunwrapper.
use_bazel_workspace_file
}
function test_fat_binary_no_srcs() {
mkdir -p package
cat > package/BUILD <<EOF
objc_library(
name = "lib_a",
srcs = ["a.m"],
)
objc_library(
name = "lib_b",
srcs = ["b.m"],
)
apple_binary(
name = "main_binary",
deps = [":lib_a", ":lib_b"],
platform_type = "ios",
minimum_os_version = "10.0",
)
genrule(
name = "lipo_run",
srcs = [":main_binary_lipobin"],
outs = ["lipo_out"],
cmd =
"set -e && " +
"lipo -info \$(location :main_binary_lipobin) > \$(@)",
tags = ["requires-darwin"],
)
EOF
touch package/a.m
cat > package/b.m <<EOF
int main() {
return 0;
}
EOF
bazel build --verbose_failures --xcode_version=$XCODE_VERSION \
//package:lipo_out --ios_multi_cpus=i386,x86_64 \
|| fail "should build apple_binary and obtain info via lipo"
cat bazel-genfiles/package/lipo_out | grep "i386 x86_64" \
|| fail "expected output binary to contain 2 architectures"
}
function test_host_xcodes() {
XCODE_VERSION=$(env -i xcodebuild -version | grep "Xcode" \
| sed -E "s/Xcode (([0-9]|.)+).*/\1/")
IOS_SDK=$(env -i xcodebuild -version -sdk | grep iphoneos \
| sed -E "s/.*\(iphoneos(([0-9]|.)+)\).*/\1/")
MACOSX_SDK=$(env -i xcodebuild -version -sdk | grep macosx \
| sed -E "s/.*\(macosx(([0-9]|.)+)\).*/\1/" | head -n 1)
# Unfortunately xcodebuild -version doesn't always pad with trailing .0, so,
# for example, may produce "6.4", which is bad for this test.
if [[ ! $XCODE_VERSION =~ [0-9].[0-9].[0-9] ]]
then
XCODE_VERSION="${XCODE_VERSION}.0"
fi
bazel build @local_config_xcode//:host_xcodes >"${TEST_log}" 2>&1 \
|| fail "Expected host_xcodes to build"
bazel query "attr(version, $XCODE_VERSION, \
attr(default_ios_sdk_version, $IOS_SDK, \
attr(default_macos_sdk_version, $MACOSX_SDK, \
labels('versions', '@local_config_xcode//:host_xcodes'))))" \
> xcode_version_target
assert_contains "local_config_xcode" xcode_version_target
DEFAULT_LABEL=$(bazel query \
"labels('default', '@local_config_xcode//:host_xcodes')")
assert_equals $DEFAULT_LABEL $(cat xcode_version_target)
}
function test_apple_binary_crosstool_ios() {
rm -rf package
mkdir -p package
cat > package/BUILD <<EOF
objc_library(
name = "lib_a",
srcs = ["a.m"],
)
objc_library(
name = "lib_b",
srcs = ["b.m"],
deps = [":cc_lib"],
)
cc_library(
name = "cc_lib",
srcs = ["cc_lib.cc"],
)
apple_binary(
name = "main_binary",
deps = [":main_lib"],
platform_type = "ios",
minimum_os_version = "10.0",
)
objc_library(
name = "main_lib",
deps = [":lib_a", ":lib_b"],
srcs = ["main.m"],
)
genrule(
name = "lipo_run",
srcs = [":main_binary_lipobin"],
outs = ["lipo_out"],
cmd =
"set -e && " +
"lipo -info \$(location :main_binary_lipobin) > \$(@)",
tags = ["requires-darwin"],
)
EOF
touch package/a.m
touch package/b.m
cat > package/main.m <<EOF
int main() {
return 0;
}
EOF
cat > package/cc_lib.cc << EOF
#include <string>
std::string GetString() { return "h3ll0"; }
EOF
bazel build --verbose_failures //package:lipo_out \
--apple_crosstool_transition \
--ios_multi_cpus=i386,x86_64 \
--xcode_version=$XCODE_VERSION \
|| fail "should build apple_binary and obtain info via lipo"
cat bazel-genfiles/package/lipo_out | grep "i386 x86_64" \
|| fail "expected output binary to be for x86_64 architecture"
}
function test_apple_binary_crosstool_watchos() {
rm -rf package
mkdir -p package
cat > package/BUILD <<EOF
genrule(
name = "lipo_run",
srcs = [":main_binary_lipobin"],
outs = ["lipo_out"],
cmd =
"set -e && " +
"lipo -info \$(location :main_binary_lipobin) > \$(@)",
tags = ["requires-darwin"],
)
apple_binary(
name = "main_binary",
deps = [":main_lib"],
platform_type = "watchos",
)
objc_library(
name = "main_lib",
srcs = ["main.m"],
deps = [":lib_a"],
)
cc_library(
name = "cc_lib",
srcs = ["cc_lib.cc"],
)
# By depending on a library which requires it is built for watchos,
# this test verifies that dependencies of apple_binary are compiled
# for the specified platform_type.
objc_library(
name = "lib_a",
srcs = ["a.m"],
deps = [":cc_lib"],
)
EOF
cat > package/main.m <<EOF
#import <WatchKit/WatchKit.h>
// Note that WKExtensionDelegate is only available in Watch SDK.
@interface TestInterfaceMain : NSObject <WKExtensionDelegate>
@end
int main() {
return 0;
}
EOF
cat > package/a.m <<EOF
#import <WatchKit/WatchKit.h>
// Note that WKExtensionDelegate is only available in Watch SDK.
@interface TestInterfaceA : NSObject <WKExtensionDelegate>
@end
int aFunction() {
return 0;
}
EOF
cat > package/cc_lib.cc << EOF
#include <string>
std::string GetString() { return "h3ll0"; }
EOF
bazel build --verbose_failures //package:lipo_out \
--apple_crosstool_transition \
--watchos_cpus=armv7k \
--xcode_version=$XCODE_VERSION \
|| fail "should build watch binary"
cat bazel-genfiles/package/lipo_out | grep "armv7k" \
|| fail "expected output binary to be for armv7k architecture"
}
function test_xcode_config_select() {
mkdir -p a
cat > a/BUILD <<'EOF'
xcode_config(
name = "xcodes",
default = ":version10",
versions = [ ":version10", ":version20", ":version30" ],
visibility = ["//visibility:public"],
)
xcode_version(
name = "version10",
default_ios_sdk_version = "1.1",
default_macos_sdk_version = "1.2",
default_tvos_sdk_version = "1.3",
default_watchos_sdk_version = "1.4",
version = "1.0",
)
xcode_version(
name = "version20",
default_ios_sdk_version = "2.1",
default_macos_sdk_version = "2.2",
default_tvos_sdk_version = "2.3",
default_watchos_sdk_version = "2.4",
version = "2.0",
)
xcode_version(
name = "version30",
default_ios_sdk_version = "3.1",
default_macos_sdk_version = "3.2",
default_tvos_sdk_version = "3.3",
default_watchos_sdk_version = "3.4",
version = "3.0",
)
config_setting(
name = "xcode10",
flag_values = { "@bazel_tools//tools/osx:xcode_version_flag": "1.0" },
)
config_setting(
name = "xcode20",
flag_values = { "@bazel_tools//tools/osx:xcode_version_flag": "2.0" },
)
config_setting(
name = "ios11",
flag_values = { "@bazel_tools//tools/osx:ios_sdk_version_flag": "1.1" },
)
config_setting(
name = "ios21",
flag_values = { "@bazel_tools//tools/osx:ios_sdk_version_flag": "2.1" },
)
genrule(
name = "xcode",
srcs = [],
outs = ["xcodeo"],
cmd = "echo " + select({
":xcode10": "XCODE 1.0",
":xcode20": "XCODE 2.0",
"//conditions:default": "XCODE UNKNOWN",
}) + " >$@",)
genrule(
name = "ios",
srcs = [],
outs = ["ioso"],
cmd = "echo " + select({
":ios11": "IOS 1.1",
":ios21": "IOS 2.1",
"//conditions:default": "IOS UNKNOWN",
}) + " >$@",)
EOF
bazel build //a:xcode //a:ios --xcode_version_config=//a:xcodes || fail "build failed"
assert_contains "XCODE 1.0" bazel-genfiles/a/xcodeo
assert_contains "IOS 1.1" bazel-genfiles/a/ioso
bazel build //a:xcode //a:ios --xcode_version_config=//a:xcodes \
--xcode_version=2.0 || fail "build failed"
assert_contains "XCODE 2.0" bazel-genfiles/a/xcodeo
assert_contains "IOS 2.1" bazel-genfiles/a/ioso
bazel build //a:xcode //a:ios --xcode_version_config=//a:xcodes \
--xcode_version=3.0 || fail "build failed"
assert_contains "XCODE UNKNOWN" bazel-genfiles/a/xcodeo
assert_contains "IOS UNKNOWN" bazel-genfiles/a/ioso
}
run_suite "apple_tests"
|
spxtr/bazel
|
src/test/shell/bazel/apple/bazel_apple_test.sh
|
Shell
|
apache-2.0
| 9,120 |
#
# Bash script to clean old reports
#
# - Cleans reports older than 1 hour in all subdirs of current dir
# - It can be schedules as a crontab job:
# $ crontab -e
# Add: 0 * * * * /home/ersin/Library/coreos-vagrant/clean_cron.sh
# for hourly activation
#
D=`ls -d www/data/*/uptime*`
for f in $D; do find $f -type f -cmin +10 -delete; done
D=`ls -d www/data/*/hostnamectl*`
for f in $D; do find $f -type f -cmin +10 -delete; done
D=`ls -d www/data/*/diskstats*`
for f in $D; do find $f -type f -cmin +10 -delete; done
D=`ls -d www/data/*/loadavg*`
for f in $D; do find $f -type f -cmin +10 -delete; done
D=`ls -d www/data/*/meminfo*`
for f in $D; do find $f -type f -cmin +10 -delete; done
D=`ls -d www/data/*/stat*`
for f in $D; do find $f -type f -cmin +10 -delete; done
D=`ls -d www/data/*/df*`
for f in $D; do find $f -type f -cmin +10 -delete; done
|
ersinesen/primon
|
utils/cleaner_cron.sh
|
Shell
|
apache-2.0
| 864 |
#!/bin/bash
#
# Script by Aiman Amir
# Telegram : @NamiaKai
# ==================================================
#
# install sertifikat
apt-get install ca-certificates
# initialisasi var
export DEBIAN_FRONTEND=noninteractive
OS=`uname -m`;
MYIP=$(wget -qO- ipv4.icanhazip.com);
MYIP2="s/xxxxxxxxx/$MYIP/g";
#regenerate hostkey
rm -r /etc/ssh*key
dpkg-reconfigure openssh-server
# go to root
cd
# disable ipv6
echo 1 > /proc/sys/net/ipv6/conf/all/disable_ipv6
sed -i '$ i\echo 1 > /proc/sys/net/ipv6/conf/all/disable_ipv6' /etc/rc.local
# install wget and curl
apt-get update;apt-get -y install wget curl;
# set time GMT +8
ln -fs /usr/share/zoneinfo/Asia/Malaysia /etc/localtime
# set locale
sed -i 's/AcceptEnv/#AcceptEnv/g' /etc/ssh/sshd_config
service ssh restart
# set repo
wget -O /etc/apt/sources.list "https://raw.github.com/blazevpn/autoscript/master/sources.list.debian7"
wget "http://www.dotdeb.org/dotdeb.gpg"
wget "http://www.webmin.com/jcameron-key.asc"
cat dotdeb.gpg | apt-key add -;rm dotdeb.gpg
cat jcameron-key.asc | apt-key add -;rm jcameron-key.asc
# update
apt-get update
# install webserver
apt-get -y install nginx
# install essential package
apt-get -y install bmon iftop htop nmap axel nano iptables traceroute sysv-rc-conf dnsutils bc nethogs openvpn vnstat less screen psmisc apt-file whois ptunnel ngrep mtr git zsh mrtg snmp snmpd snmp-mibs-downloader unzip unrar rsyslog debsums rkhunter
apt-get -y install build-essential
# disable exim
service exim4 stop
sysv-rc-conf exim4 off
# update apt-file
apt-file update
# install figlet
apt-get install figlet
echo "clear" >> .bashrc
echo 'figlet -k "$HOSTNAME"' >> .bashrc
echo 'echo -e "Selamat datang ke server $HOSTNAME"' >> .bashrc
echo 'echo -e "Script mod by Aiman Amir"' >> .bashrc
echo 'echo -e "Taip menu untuk menampilkan senarai perintah yang tersedia"' >> .bashrc
echo 'echo -e ""' >> .bashrc
# install webserver
cd
rm /etc/nginx/sites-enabled/default
rm /etc/nginx/sites-available/default
wget -O /etc/nginx/nginx.conf "https://raw.github.com/blazevpn/autoscript/master/nginx.conf"
mkdir -p /home/vps/public_html
echo "<pre>Setup by Aiman Amir | 081515292117</pre>" > /home/vps/public_html/index.html
wget -O /etc/nginx/conf.d/vps.conf "https://github.com/blazevpn/autoscript/blob/master/vps.conf"
service nginx restart
# install openvpn
wget -O /etc/openvpn/openvpn.tar "https://raw.github.com/blazevpn/autoscript/master/openvpn-debian.tar"
cd /etc/openvpn/
tar xf openvpn.tar
wget -O /etc/openvpn/1194.conf "https://raw.github.com/blazevpn/autoscript/master/1194.conf"
service openvpn restart
sysctl -w net.ipv4.ip_forward=1
sed -i 's/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/g' /etc/sysctl.conf
iptables -t nat -I POSTROUTING -s 192.168.100.0/24 -o eth0 -j MASQUERADE
iptables-save > /etc/iptables_yg_baru_dibikin.conf
wget -O /etc/network/if-up.d/iptables "https://raw.github.com/blazevpn/autoscript/master/iptables"
chmod +x /etc/network/if-up.d/iptables
service openvpn restart
# konfigurasi openvpn
cd /etc/openvpn/
wget -O /etc/openvpn/client.ovpn "https://raw.github.com/blazevpn/autoscript/master/client-1194.conf"
sed -i $MYIP2 /etc/openvpn/client.ovpn;
cp client.ovpn /home/vps/public_html/
cd
# install badvpn
wget -O /usr/bin/badvpn-udpgw "https://raw.github.com/blazevpn/autoscript/master/badvpn-udpgw"
if [ "$OS" == "x86_64" ]; then
wget -O /usr/bin/badvpn-udpgw "https://raw.github.com/blazevpn/autoscript/master/badvpn-udpgw"
fi
sed -i '$ i\screen -AmdS badvpn badvpn-udpgw --listen-addr 127.0.0.1:7300' /etc/rc.local
chmod +x /usr/bin/badvpn-udpgw
screen -AmdS badvpn badvpn-udpgw --listen-addr 127.0.0.1:7300
cd
# setting port ssh
sed -i 's/Port 22/Port 22/g' /etc/ssh/sshd_config
sed -i '/Port 22/a Port 80' /etc/ssh/sshd_config
service ssh restart
# install dropbear
apt-get -y install dropbear
sed -i 's/NO_START=1/NO_START=0/g' /etc/default/dropbear
sed -i 's/DROPBEAR_PORT=22/DROPBEAR_PORT=443/g' /etc/default/dropbear
sed -i 's/DROPBEAR_EXTRA_ARGS=/DROPBEAR_EXTRA_ARGS="-p 443 -p 143"/g' /etc/default/dropbear
echo "/bin/false" >> /etc/shells
echo "/usr/sbin/nologin" >> /etc/shells
service ssh restart
service dropbear restart
cd
# install fail2ban
apt-get -y install fail2ban;service fail2ban restart
# install squid3
apt-get -y install squid3
wget -O /etc/squid3/squid.conf "https://raw.github.com/blazevpn/autoscript/master/squid3.conf"
sed -i $MYIP2 /etc/squid3/squid.conf;
service squid3 restart
# install webmin
cd
wget -O webmin-current.deb "http://www.webmin.com/download/deb/webmin-current.deb"
dpkg -i --force-all webmin-current.deb;
apt-get -y -f install;
rm /root/webmin-current.deb
service webmin restart
# download script
cd /usr/bin
wget -O menu "https://raw.github.com/blazevpn/autoscript/master/menu.sh"
wget -O usernew "https://raw.github.com/blazevpn/autoscript/master/usernew.sh"
wget -O trial "https://raw.github.com/blazevpn/autoscript/master/trial.sh"
wget -O hapus "https://raw.github.com/blazevpn/autoscript/master/hapus.sh"
wget -O login "https://raw.github.com/blazevpn/autoscript/master/user-login.sh"
wget -O member "https://raw.github.com/blazevpn/autoscript/master/user-list.sh"
wget -O resvis "https://raw.github.com/blazevpn/autoscript/master/resvis.sh"
wget -O speedtest "https://raw.github.com/blazevpn/autoscript/master/speedtest_cli.py"
wget -O about "https://raw.github.com/blazevpn/autoscript/master/about.sh"
echo "0 0 * * * root /usr/bin/reboot" > /etc/cron.d/reboot
echo "* * * * * service dropbear restart" > /etc/cron.d/dropbear
chmod +x menu
chmod +x usernew
chmod +x trial
chmod +x hapus
chmod +x login
chmod +x member
chmod +x resvis
chmod +x speedtest
chmod +x about
# finishing
cd
chown -R www-data:www-data /home/vps/public_html
service nginx start
service openvpn restart
service cron restart
service ssh restart
service dropbear restart
service squid3 restart
service webmin restart
rm -rf ~/.bash_history && history -c
echo "unset HISTFILE" >> /etc/profile
# info
clear
echo "Autoscript Include:" | tee log-install.txt
echo "===========================================" | tee -a log-install.txt
echo "" | tee -a log-install.txt
echo "Service" | tee -a log-install.txt
echo "-------" | tee -a log-install.txt
echo "OpenSSH : 22, 80" | tee -a log-install.txt
echo "Dropbear : 443, 143" | tee -a log-install.txt
echo "Squid3 : 8080, 3128 (limit to IP SSH)" | tee -a log-install.txt
echo "OpenVPN : TCP 1194 (client config : http://$MYIP:81/client.ovpn)" | tee -a log-install.txt
echo "badvpn : badvpn-udpgw port 7300" | tee -a log-install.txt
echo "nginx : 81" | tee -a log-install.txt
echo "" | tee -a log-install.txt
echo "Script" | tee -a log-install.txt
echo "------" | tee -a log-install.txt
echo "menu (Senarai perintah)" | tee -a log-install.txt
echo "usernew (Membuat Akaun SSH)" | tee -a log-install.txt
echo "trial (Membuat Akaun Trial)" | tee -a log-install.txt
echo "hapus (Menghapus Akaun SSH)" | tee -a log-install.txt
echo "login (Semak login user)" | tee -a log-install.txt
echo "member (Senarai user)" | tee -a log-install.txt
echo "resvis (Restart Service dropbear, webmin, squid3, openvpn dan ssh)" | tee -a log-install.txt
echo "reboot (Reboot VPS)" | tee -a log-install.txt
echo "speedtest (Speedtest VPS)" | tee -a log-install.txt
echo "about (Informasi tentang script auto install)" | tee -a log-install.txt
echo "" | tee -a log-install.txt
echo "Feature lain" | tee -a log-install.txt
echo "----------" | tee -a log-install.txt
echo "Webmin : http://$MYIP:10000/" | tee -a log-install.txt
echo "Timezone : Asia/Malaysia (GMT +8)" | tee -a log-install.txt
echo "IPv6 : [off]" | tee -a log-install.txt
echo "" | tee -a log-install.txt
echo "Modified by Aiman Amir" | tee -a log-install.txt
echo "" | tee -a log-install.txt
echo "Log Instalasi --> /root/log-install.txt" | tee -a log-install.txt
echo "" | tee -a log-install.txt
echo "VPS AUTO REBOOT TIAP 12 JAM" | tee -a log-install.txt
echo "" | tee -a log-install.txt
echo "===========================================" | tee -a log-install.txt
cd
rm -f /root/debian7.sh.x
|
blazevpn/autoscript
|
debian7.sh
|
Shell
|
apache-2.0
| 8,097 |
#!/bin/sh
nohup node http-server.js >/dev/null 2>&1 &
|
saxman/maps-js-icoads
|
http-server.sh
|
Shell
|
apache-2.0
| 56 |
#!/bin/bash
#文件名称:~/bin/apkpackage
#实现功能:在Linux下查看apk文件的程序包名(而不需要解压);
currentpath=${PWD}
echo "${currentpath}"
find ${currentpath} -type f -name "*.apk" |xargs -i aapt dump xmltree {} AndroidManifest.xml | sed -n "/package=/p" |awk -F'"' '{print $2}'
exit 0
|
kuangzt/Practice4Shell
|
examinepackagename.sh
|
Shell
|
apache-2.0
| 314 |
#! /bin/bash
cd `dirname $0`
CURRPWD=`pwd`
CONFIG_FILE=configs/create_deb.conf
if [[ -f $CONFIG_FILE ]]; then
. $CONFIG_FILE
else
exit 1
fi
echo -e "\e[92m"
read -p "Do you want to increment the adtn version (${RELEPLAT})? (y/N)" releplat
releplat=${releplat,,} #var to lower case
if [[ $releplat =~ (yes|y)$ ]]; then
sed -i "s/RELEPLAT=\"${RELEPLAT}\"/RELEPLAT=\"$((${RELEPLAT}+1))\"/g" $CONFIG_FILE
fi
read -p "Do you want to increment the library version (${RELELIB})? (y/N)" relelib
relelib=${relelib,,} #var to lower case
if [[ $relelib =~ (yes|y)$ ]]; then
sed -i "s/RELELIB=\"${RELELIB}\"/RELELIB=\"$((${RELELIB}+1))\"/g" $CONFIG_FILE
fi
read -p "Do you want to increment the tools version (${RELEEXM})? (y/N)" releexm
releplat=${releexm,,} #var to lower case
if [[ $releexm =~ (yes|y)$ ]]; then
sed -i "s/RELEEXM=\"${RELEEXM}\"/RELEEXM=\"$((${RELEEXM}+1))\"/g" $CONFIG_FILE
fi
for arch in $ARCHILIST; do
if [[ $ARCH == $ARCH32 ]]; then
sed -i "s/ARCH=\$ARCH32/ARCH=$arch/g" $CONFIG_FILE
elif [[ $ARCH == $ARCH64 ]]; then
sed -i "s/ARCH=\$ARCH64/ARCH=$arch/g" $CONFIG_FILE
fi
. $CONFIG_FILE
echo -e "\e[92mRemoving old debs if any...\e[39m"
sudo dpkg --purge $NAMEEXM $NAMELIB $NAMEPLAT
echo -e "\e[92mRemoving previous auxiliar installation folder...\e[39m"
sudo rm -fr $ADTNAUX
echo -e "\e[92mGenerating adtn deb...\e[39m"
bash deb_adtn.sh
echo -e "\e[92mInstalling adtn...\e[39m"
sudo dpkg -i $RELEASE/v$VERSPLAT-$RELEPLAT/$DEB/${NAMEPLAT}_${VERSPLAT}-${RELEPLAT}_${ARCH}.deb
echo -e "\e[92mGenerating adtn lib...\e[39m"
bash deb_lib.sh
echo -e "\e[92mInstalling lib...\e[39m"
sudo dpkg -i $RELEASE/v$VERSLIB-$RELELIB/$DEB/${NAMELIB}_${VERSLIB}-${RELELIB}_${ARCH}.deb
echo -e "\e[92mGenerating tools...\e[39m"
bash deb_tools.sh
echo -e "\e[92mInstalling tools...\e[39m"
sudo dpkg -i $RELEASE/v$VERSEXM-$RELEEXM/$DEB/${NAMEEXM}_${VERSEXM}-${RELEEXM}_${ARCH}.deb
echo -e "\e[92m"
read -p "Do you want to remove the installed debs? (Y/n)" var
echo -e "\e[39m"
var=${var,,} #var to lower case
if [[ $var =~ (yes|y)$ ]] || [ -z $var ]; then
echo -e "\e[92mRemoving debs installed...\e[39m"
sudo dpkg --purge $NAMEEXM $NAMELIB $NAMEPLAT
fi
sudo rm -fr $CURRPWD/$ADTNAUX
if [[ $ARCH == $ARCHARM ]]; then
break;
fi
ARCH=`eval echo -e "$arch"`
done;
echo -e "\e[92mEverything went as expected. Have a nice day =)~\e[39m"
exit 0;
|
SeNDA-UAB/aDTN-platform
|
make_debs/create_debs.sh
|
Shell
|
apache-2.0
| 2,436 |
rm -rf ../bin
mkdir ../bin
cd ../bin
groovyc ../src/*.groovy
cd ../src
|
peidevs/OpenDataBookClub
|
tools/PDF_Thresher/src/build.sh
|
Shell
|
apache-2.0
| 74 |
#cd .
tar -zcf bin.tgz ./bin ./sbin ./diary ./.bashrc ./.emacs # ../.bash_profile
|
bayvictor/distributed-polling-system
|
bin/bintar.sh
|
Shell
|
apache-2.0
| 83 |
#!/bin/bash
# inputs file
INPUTS_TSV=$1
GATK_GOOGLE_DIR="dfci-cccb-pipeline-testing"
while read line; do
cp VariantCalling.cloud.inputs.template.json VariantCalling.cloud.inputs.json
INPUT=$(echo $line | awk '{ print $1 }')
SAMPLENAME=$(echo $line | awk '{ print $2 }')
sed -i "s#BAM_INJECTION#${INPUT}#g" VariantCalling.cloud.inputs.json
sed -i "s#INPUT_BASENAME_INJECTION#${SAMPLENAME}#g" VariantCalling.cloud.inputs.json
gcloud alpha genomics pipelines run \
--pipeline-file wdl_pipeline.yaml \
--zones us-east1-b \
--logging gs://dfci-cccb-pipeline-testing/logging \
--inputs-from-file WDL=VariantCalling.cloud.wdl \
--inputs-from-file WORKFLOW_INPUTS=VariantCalling.cloud.inputs.json \
--inputs-from-file WORKFLOW_OPTIONS=VariantCalling.cloud.options.json \
--inputs WORKSPACE=gs://dfci-cccb-pipeline-testing/workspace \
--inputs OUTPUTS=gs://dfci-cccb-pipeline-testing/outputs
done < $INPUTS_TSV
|
dkdeconti/DFCI-CCCB-GATK-Cloud-pipeline
|
archive/GATK_Cloud_single_sample_pipeline/VariantCalling/VariantCalling.batch_submit.sh
|
Shell
|
bsd-2-clause
| 992 |
#/bin/bash
# php扩展全家桶. ( 不建议使用了, 说明如下 )
# @farwish.com BSD-License
#
# 根据需要用 pecl 安装所需常用扩展.
# Example:
#
# /usr/lcoal/php7.0.14/bin/pecl install redis-3.0.0
# /usr/lcoal/php7.0.14/bin/pecl install yar-2.0.1
# /usr/lcoal/php7.0.14/bin/pecl install swoole-1.9.1
#
php_path=/usr/local/php5.6.25/
cur_dir=`pwd`
yum install -y git
# extension=eio.so
pecl install eio
# extension=libevent.so
yum install -y libevent-devel.x86_64
pecl install libevent-0.1.0
# extension=redis.so
if [ ! -d ${cur_dir}/phpredis ]; then
git clone -b 2.2.8 https://github.com/phpredis/phpredis.git
fi
cd phpredis
phpize
./configure --with-php-config=${php_path}bin/php-config
make && make install
# extension=swoole.so
pecl install swoole
echo -e "\nCompleted! 要在php.ini中加入的配置有:\n"
echo -e "extension=eio.so
extension=libevent.so
extension=redis.so
extension=swoole.so\n"
|
farwish/delicateShell
|
lnmp/installPhpExt.sh
|
Shell
|
bsd-2-clause
| 929 |
#!/bin/sh
## Unreal Engine 4 Mono setup script
## Copyright 1998-2015 Epic Games, Inc. All Rights Reserved.
## This script is expecting to exist in the UE4/Engine/Build/BatchFiles directory. It will not work correctly
## if you copy it to a different location and run it.
echo
echo Running Mono...
echo
source "`dirname "$0"`/SetupMono.sh" "`dirname "$0"`"
# put ourselves into Engine directory (two up from location of this script)
pushd "`dirname "$0"`/../../.."
if [ ! -f Build/BatchFiles/Mac/RunMono.sh ]; then
echo RunMono ERROR: The batch file does not appear to be located in the /Engine/Build/BatchFiles directory. This script must be run from within that directory.
exit 1
fi
mono "$@"
exit $?
|
PopCap/GameIdea
|
Engine/Build/BatchFiles/Mac/RunMono.sh
|
Shell
|
bsd-2-clause
| 714 |
prependSearchPath() {
NIX_CFLAGS_COMPILE="-F@out@/Developer/SDKs/System/Library/Frameworks -I@out@/Developer/SDKs/@name@/usr/include ${NIX_CFLAGS_COMPILE}"
}
preConfigureHooks+=(prependSearchPath)
|
ajnsit/reflex-platform
|
setup-hook.sh
|
Shell
|
bsd-3-clause
| 200 |
#!/bin/sh
#
# $KAME: mip6seccontrol.sh,v 1.1 2004/12/09 02:18:43 t-momose Exp $
cat=/bin/cat
setkey=${setkey_program:-/usr/local/v6/sbin/setkey}
show_usage() {
${cat} <<EOF
Usage: ${0} {-m|-g} commands node_dir
The default config directory is ${ipv6_mobile_config_dir}.
This value can be changed by modifing ipv6_mobile_config_dir
variable in /etc/rc.conf.
Commands:
installall
deinstallall
install nodename
deinstall nodename
reinstall nodename
add nodename
delete nodename
spdadd nodename
spddelete nodename
EOF
}
#
# source rc.conf
#
if [ -r /etc/defaults/rc.conf ]; then
. /etc/defaults/rc.conf
fi
if [ -r /etc/rc.conf ]; then
. /etc/rc.conf
fi
ipv6_mobile_config_dir=${ipv6_mobile_config_dir:-/usr/local/v6/etc/mobileip6}
if [ $# -lt 1 ]; then
show_usage
exit 1
fi
#
# check switch
#
case ${1} in
-m)
config_suffix=_mobile_node
;;
-g)
config_suffix=_home_agent
;;
*)
show_usage
exit 1
;;
esac
# argv is shifted
shift
#
# process commands which don't require argument
#
case ${1} in
installall)
for node_dir in ${ipv6_mobile_config_dir}/*
do
if [ ! -e ${node_dir}/add ]; then
continue;
fi
${setkey} -f ${node_dir}/add
${setkey} -f ${node_dir}/spdadd${config_suffix}
done
;;
deinstallall)
for node_dir in ${ipv6_mobile_config_dir}/*
do
if [ ! -e ${node_dir}/add ]; then
continue;
fi
${setkey} -f ${node_dir}/delete
${setkey} -f ${node_dir}/spddelete${config_suffix}
done
;;
reinstallall)
for node_dir in ${ipv6_mobile_config_dir}/*
do
if [ ! -e ${node_dir}/add ]; then
continue;
fi
${setkey} -f ${node_dir}/delete
${setkey} -f ${node_dir}/spddelete${config_suffix}
${setkey} -f ${node_dir}/add
${setkey} -f ${node_dir}/spdadd${config_suffix}
done
esac
#
# these commands need no further processing
#
case ${1} in
installall|deinstallall|reinstallall)
exit 0
;;
esac
if [ $# -lt 2 ]; then
show_usage
exit 1
fi
#
# check node_dir
#
if [ ! -d ${ipv6_mobile_config_dir}/${2} ]; then
cat << EOF
No configuration directory for the node ${2}.
EOF
exit 1
fi
node_dir=${ipv6_mobile_config_dir}/${2}
#
# process commands
#
case ${1} in
install)
${setkey} -f ${node_dir}/add
${setkey} -f ${node_dir}/spdadd${config_suffix}
;;
deinstall)
${setkey} -f ${node_dir}/delete
${setkey} -f ${node_dir}/spddelete${config_suffix}
;;
reinstall)
${setkey} -f ${node_dir}/delete
${setkey} -f ${node_dir}/spddelete${config_suffix}
${setkey} -f ${node_dir}/add
${setkey} -f ${node_dir}/spdadd${config_suffix}
;;
add)
${setkey} -f ${node_dir}/add
;;
delete)
${setkey} -f ${node_dir}/delete
;;
spdadd)
${setkey} -f ${node_dir}/spdadd${config_suffix}
;;
spddelete)
${setkey} -f ${node_dir}/spddelete${config_suffix}
;;
*)
show_usage
exit 1
esac
exit 0
|
MarginC/kame
|
kame/kame/shisad/mip6seccontrol.sh
|
Shell
|
bsd-3-clause
| 2,751 |
#
# unzip armitage.tgz in this directory first.
#
rm -rf dist
mkdir dist
cp -r Armitage.app dist
cp armitage/armitage.jar dist/Armitage.app/Contents/Java
cp armitage/*.txt dist/
cp *.rtf dist/
rm -rf armitage
mv dist Armitage
hdiutil create -ov -volname Armitage -srcfolder ./Armitage armitage.dmg
rm -rf armitage
|
rsmudge/armitage
|
dist/mac/build.sh
|
Shell
|
bsd-3-clause
| 315 |
# could also use rdmd like this:
# rdmd --build-only -version=reggaelib -version=minimal -ofbin/reggae -Isrc -Ipayload -Jpayload/reggae reggaefile.d -b binary .
# This script doesn't just so it doesn't have to depend on rdmd being available
dmd -version=minimal -ofbin/reggae -Isrc -Ipayload -Jpayload/reggae src/reggae/{reggae_main,options,reggae}.d payload/reggae/{types,build,config}.d payload/reggae/rules/{common,defaults}.d
cd bin
./reggae -b binary ..
./build
|
yshui/reggae
|
minimal_bootstrap.sh
|
Shell
|
bsd-3-clause
| 469 |
if [ ! -e ../.database-username -a ! -e ../.database-password ]
then
echo ERROR: Missing ../.database-username and/or ../.database-password files
exit 1
fi
mysql --silent opentaal -u `cat ../.database-username` -p`cat ../.database-password` -D opentaal -v -e "SELECT next_version,word FROM words_list WHERE word LIKE '%-%' AND (next_version = 'b' OR next_version = 'B' OR next_version = 'k' OR next_version = 'K' OR next_version = 'f' OR next_version = 'F') order by next_version,word"|tail -n +5>woorden-met-streepje.tsv
echo Aantal woorden met streepje: `cat woorden-met-streepje.tsv|wc -l`
|
OpenTaal/database-tools
|
woorden-met-streepje.sh
|
Shell
|
bsd-3-clause
| 595 |
#!/bin/sh
print_usage () {
echo "Usage:"
echo " $0 Clean up prior test, build, and test."
echo " $0 -c Clean up prior test only."
}
if [ "$1" != "" ] && [ "$1" != "-c" ]; then
print_usage; exit 1
fi
clean () {
rm test/cache/layers/absolute-json.json
rm test/cache/layers/csv.csv
rm test/cache/layers/polygons.json
rm test/data/.ne_10m_admin_0_boundary_lines_disputed_areas.zip
rm test/data/ne_10m_admin_0_boundary_lines_disputed_areas.dbf
rm test/data/ne_10m_admin_0_boundary_lines_disputed_areas.html
rm test/data/ne_10m_admin_0_boundary_lines_disputed_areas.prj
rm test/data/ne_10m_admin_0_boundary_lines_disputed_areas.shp
rm test/data/ne_10m_admin_0_boundary_lines_disputed_areas.shx
rm test/data/ne_10m_admin_0_boundary_lines_disputed_areas.txt
rm -rf test/cache/layers/absolute-shp/
rm -rf test/cache/layers/stations/
rm -rf test/cache/layers/zip-no-ext/
rm -rf test/corrupt-zip/layers/
rm -rf test/macosx-zipped/layers/
rm -rf test/multi-shape-zip/layers/
rm -rf test/zipped-json/layers/
}
if [ "$1" == "-c" ]; then
clean; exit 0
fi
clear
npm install
npm test
clean
exit 0
|
mapbox/millstone
|
build.sh
|
Shell
|
bsd-3-clause
| 1,112 |
while read line || [ -n "$line" ]; do
b=-1
c=1
for a in $line; do
if [ $a -eq $b ]; then
((c++))
else
if [ $b -ge 0 ]; then
printf "%d %d " $c $b
c=1
fi
b=$a
fi
done
printf "%d %d\n" $c $b
done < $1
|
nikai3d/ce-challenges
|
easy/compressed_sequence.bash
|
Shell
|
bsd-3-clause
| 323 |
#!/bin/bash
#=== Building and Installing Exodus Using zypper ===
#you can view/run the latest version of this script directly
# wget -O - -q http://exodusdb.googlecode.com/svn/trunk/buildinstall_zyp.sh | less
# wget -O - -q http://exodusdb.googlecode.com/svn/trunk/buildinstall_zyp.sh | bash
#or
# curl http://exodusdb.googlecode.com/svn/trunk/buildinstall_zyp.sh | less
# curl http://exodusdb.googlecode.com/svn/trunk/buildinstall_zyp.sh | bash
#it takes about 5 minutes
#tested on:
#openSUSE 11.4 "Celadon"
#Linux linux-ehmx 2.6.37.6-0.5-desktop #1 SMP PREEMPT 2011-04-25 21:48:33 +0200 x86_64 x86_64 x86_64 GNU/Linux
echo building on:
cat /etc/issue|head -n 1
uname -a
export yum="yum -y"
export yum="zypper --non-interactive"
#==== 1. Installing or Building Boost ====
sudo $yum install icu libicu libicu-devel
#rather dumb version centos/redhat 5 detection
if [ "`cat /etc/issue|grep " 5"`" == "" ]; then
#sudo $yum install boost-devel boost-date-time boost-filesystem boost-regex boost-system boost-thread
sudo $yum install boost-devel '*boost_date_time*' '*boost_filesystem*' '*boost_regex*' '*boost_system*' '*boost_thread*'
else
#IT IS CRITICAL THAT BOOST-DEVEL IS *NOT* INSTALLED if building boost as follows.
sudo $yum remove boost-devel
sudo $yum install gcc-c++
cd ~
rm -f boost_1_46_1.tar.gz
wget http://sourceforge.net/projects/boost/files/boost/1.46.1/boost_1_46_1.tar.gz/download
tar xfz boost_1_46_1.tar.gz
cd boost_1_46_1
#bjam should say "has_icu builds: yes" otherwise check your icu installation above
./bootstrap.sh
sudo ./bjam --with-date_time --with-filesystem --with-regex --with-system --with-thread install
fi
#==== 2. Building and Installing Exodus ====
sudo $yum install subversion gcc-c++ postgresql-devel make
cd ~
svn co HTTPS://exodusdb.googlecode.com/svn/trunk/ exodus
cd ~/exodus
./configure
make && sudo make install
#==== 3. Configuring Postgresql ====
sudo $yum install postgresql-server
test -f /usr/local/bin/installexodus-postgresql && sudo /usr/local/bin/installexodus-postgresql
test -f /usr/bin/installexodus-postgresql && sudo /usr/bin/installexodus-postgresql
#==== 4. Configuring Exodus ====
echo host=127.0.0.1 \
port=5432 \
dbname=exodus \
user=exodus \
password=somesillysecret \
> ~/.exodus
#==== 5. Testing Exodus ====
cd ~
testsort
==== 6. Programming with Exodus ====
#you must make some change to hello or actually save it, not just quit
#edic hello
#hello
#compile hello
#find /usr|grep exodus.*examples
|
rdmenezes/exodusdb
|
buildinstall_zyp.sh
|
Shell
|
mit
| 2,528 |
javac -sourcepath src -d src/net/gadgetfactory/papilio/loader src/net/gadgetfactory/papilio/loader/PapilioLoader.java
jar cfm0 papilio-loader.jar PapilioLoader.mf -C src/net/gadgetfactory/papilio/loader/ .
#java -jar papilio-loader.jar
|
chcbaram/FPGA
|
zap-2.3.0-windows/papilio-zap-ide/tools/Papilio_Loader/build.sh
|
Shell
|
mit
| 236 |
#!/bin/bash
# Script Name: AtoMiC Ubooquity Reverse Proxy Enable.
if sed -i "/reverseProxyPrefix/c\\ \"reverseProxyPrefix\" : \"ubooquity\"," "$APPSETTINGS"; then
echo "Updated reverseProxyPrefix in $APPSETTINGS"
fi
|
TommyE123/AtoMiC-ToolKit
|
ubooquity/ubooquity-reverse-proxy-enable.sh
|
Shell
|
mit
| 222 |
if [ "$#" -ne 3 ]; then
echo "Illegal number of parameters";
echo "Name ID TimeStamp";
exit 0;
fi;
STRING=$1'\n\n'$2'\n\n'$3
echo $STRING | lpr -P LabelWriter-450-Turbo -o protrait -o PageSize=w57h32 -o page-left=25 -o page-right=25 -o page-top=25 -o page-bottom=25
|
henryYHC/STS-Integrated-System
|
app/scripts/printLabel.sh
|
Shell
|
mit
| 279 |
# Looping and Skipping
# display only odd natural numbers from 1 to 99
#!/bin/bash
for i in {1..100..2}
do
echo $i
done
|
anishacharya/Shell-Scripting
|
looping_fundamentals.sh
|
Shell
|
mit
| 172 |
#!/bin/bash
###
### Wrapper script to start/stop SMQ Server and Worker daemons
###
### @author Knut Kohl <[email protected]>
### @copyright 2012-2015 Knut Kohl
### @license MIT License (MIT) http://opensource.org/licenses/MIT
### @version 1.0.0
###
#set -x
pwd=$(dirname $(readlink -f $0))
#pwd=$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)
conf=$pwd/QueueServer.conf
[ -f "$conf" ] || ( echo 'Missing configuration file!'; exit 127 )
case $1 in
Server)
prog=Server
;;
Worker)
prog=Worker
;;
*)
echo "Usage: $0 (Server|Worker)"
exit 1
esac
. $conf
### Defaults
[ "$PORT" ] || PORT=7777
[ "$MEMCACHE" ] || MEMCACHE=localhost:11211
php $pwd/$prog.php -vvm $MEMCACHE -p $PORT
|
pafei/PVLng
|
tools/QueueServer/QueueServer-dbg.sh
|
Shell
|
mit
| 749 |
#!/usr/bin/env bash
for f in ../../_examples/*; do
if [ -d "$f" ]; then
# Will not run if no directories are available
go mod init
go get -u github.com/kataras/iris/v12@master
go mod download
go run .
fi
done
# git update-index --chmod=+x ./.github/scripts/setup_examples_test.bash
|
kataras/gapi
|
.github/scripts/setup_examples_test.bash
|
Shell
|
mit
| 331 |
#! /bin/bash
source ${PWD}/env_setup
curdir=${PWD}
sudo
clear
echo
echo "*** FORMAT SDCARD ********************************** TCP120706 ***"
echo
echo BE SURE SDCARD IS REMOVED
echo -n and press enter or Ctrl-C to skip" ? "
read
clear
echo
echo "*** FORMAT SDCARD ********************************** TCP120706 ***"
echo
sleep 2
cd /dev
ls sd?
cd ${curdir}
echo
echo
echo CHECK LIST - INSERT SDCARD
echo -n and press enter or Ctrl-C to skip" ? "
read
clear
echo
echo "*** FORMAT SDCARD ********************************** TCP120706 ***"
echo
sleep 2
cd /dev
ls sd?
cd ${curdir}
echo
echo
echo CHECK LIST CHANGES - Type the added partition name \(e.c. sdb\)
echo -n and press enter or Ctrl-C to skip" ? "
read drive
if [ -n "$drive" ]
then
if [ "$drive" != "sda" ]
then
while true
do
clear
echo
echo "*** FORMAT SDCARD ********************************** TCP120706 ***"
echo
echo
echo "!!! ALL DATA ON DRIVE WILL BE LOST !!!"
echo
echo FORMAT "\""${drive}"\""
echo -n press enter or Ctrl-C to skip" ? "
read
echo
echo " ...."formatting.sdcard
sleep 5
sudo umount /dev/${drive}1 &> /dev/null
sudo umount /dev/${drive}2 &> /dev/null
sudo fdisk /dev/${drive} < fdisk.cmd &>> sdcard.err
sleep 2
echo
echo " ...."making.kernel.partition
sudo mkfs.msdos -n LMS2012 /dev/${drive}1 &>> sdcard.err
sleep 2
echo
echo " ...."making.filesystem.partition
sudo mkfs.ext3 -L LMS2012_EXT /dev/${drive}2 &>> sdcard.err
echo
echo " ...."checking.partitions
sync
if [ -e /dev/${drive}1 ]
then
if [ -e /dev/${drive}2 ]
then
echo
echo SUCCESS
else
echo
echo "******************************************************************"
cat sdcard.err
echo "******************************************************************"
echo
echo SDCARD NOT FORMATTED PROPERLY !!!
fi
else
echo
echo "******************************************************************"
cat sdcard.err
echo "******************************************************************"
echo
echo SDCARD NOT FORMATTED PROPERLY !!!
fi
echo
echo REMOVE sdcard
echo
echo "******************************************************************"
echo
echo
echo FORMAT ANOTHER ONE
echo -n press enter or Ctrl-C to skip" ? "
read
echo
echo
echo INSERT SDCARD
echo -n and press enter or Ctrl-C to skip" ? "
read
sleep 5
done
else
echo
echo YOU MUST NOT SPECIFY "sda" !!!
echo
echo "******************************************************************"
echo
fi
else
echo
echo YOU MUST SPECIFY A DRIVE !!!
echo
echo "******************************************************************"
echo
fi
|
Larsjep/monoev3image
|
format_sdcard.sh
|
Shell
|
mit
| 2,639 |
#!/bin/bash
python ../../tools/process_xml2.py -x xmls/res.xml --src_data data --dest_data data/ext --compress pvrtc
python ../../tools/process_xml2.py -x demo/res_ui.xml --src_data data --dest_data data/ext --compress pvrtc
|
unitpoint/oxygine-objectscript
|
examples/DemoOS/prepare_res_pvrtc.sh
|
Shell
|
mit
| 225 |
cd "$(dirname "$0")"
for cmd in node narwhal ringo rhino; do
echo ""
echo "Running performance suite in $cmd..."
$cmd perf.js
done
echo ""
echo "Running performance suite in a browser..."
open index.html
|
Ninir/lodash
|
perf/run-perf.sh
|
Shell
|
mit
| 206 |
#!/bin/bash
file_or_directory=`awk -F", " '{print $1}' config`;
ssh_data=`awk -F", " '{print $2}' config`;
destination=`awk -F", " '{print $3}' config`;
deployit () {
if [[ $file_or_directory && $ssh_data && $destination ]]; then
# rsync -avz -e 'ssh -p 2222' $file_or_directory $ssh_data:$destination
rsync -avz -e ssh $file_or_directory $ssh_data:$destination
else
echo -e "Missing parameter (s).";
fi
}
initializer() {
deployit;
}
initializer;
|
marcker/scaffold
|
static/deploy/deployit.sh
|
Shell
|
mit
| 471 |
# Less Colors for Man Pages
export LESS_TERMCAP_mb=$'\E[01;31m' # begin blinking
export LESS_TERMCAP_md=$'\E[01;38;5;74m' # begin bold
export LESS_TERMCAP_me=$'\E[0m' # end mode
export LESS_TERMCAP_se=$'\E[0m' # end standout-mode
export LESS_TERMCAP_so=$'\E[38;5;246m' # begin standout-mode - info box
export LESS_TERMCAP_ue=$'\E[0m' # end underline
export LESS_TERMCAP_us=$'\E[04;38;5;146m' # begin underline
|
thommahoney/pairbear
|
system/less.zsh
|
Shell
|
mit
| 450 |
#!/bin/sh
# success
./simple-test.sh `basename $0 .sh` test1 --help
|
mc-server/TCLAP
|
tests/test40.sh
|
Shell
|
mit
| 69 |
# The output of all these installation steps is noisy. With this utility
# the progress report is nice and concise.
function install {
echo installing $1
shift
apt-get -y install "$@" >/dev/null 2>&1
}
# upgrade system
apt-get update && sudo apt-get -y upgrade
install 'development tools' make build-essential libssl-dev libreadline6-dev zlib1g-dev libyaml-dev libc6-dev libcurl4-openssl-dev libksba8 libksba-dev libqtwebkit-dev
install 'Headless requirements' xvfb
install Git git git-core
install SQLite sqlite3 libsqlite3-dev
install memcached memcached
install Redis redis-server
install RabbitMQ rabbitmq-server
# install rbenv and ruby-build
sudo -u vagrant git clone git://github.com/sstephenson/rbenv.git /home/vagrant/.rbenv
sudo -u vagrant echo 'export PATH="$HOME/.rbenv/bin:$PATH"' >> /home/vagrant/.profile
sudo -u vagrant echo 'eval "$(rbenv init -)"' >> /home/vagrant/.profile
sudo -u vagrant git clone git://github.com/sstephenson/ruby-build.git /home/vagrant/.rbenv/plugins/ruby-build
# no rdoc for installed gems
sudo -u vagrant echo 'gem: --no-ri --no-rdoc' >> /home/vagrant/.gemrc
# install required ruby versions
sudo -u vagrant -i rbenv install 2.2.3
sudo -u vagrant -i rbenv global 2.2.3
sudo -u vagrant -i ruby -v
sudo -u vagrant -i gem install bundler --no-ri --no-rdoc
sudo -u vagrant -i rbenv rehash
echo installing Bundler
gem install bundler -N >/dev/null 2>&1
install PostgreSQL postgresql postgresql-contrib libpq-dev
sudo -u postgres createuser --superuser vagrant
sudo -u postgres createdb -O vagrant activerecord_unittest
sudo -u postgres createdb -O vagrant activerecord_unittest2
debconf-set-selections <<< 'mysql-server mysql-server/root_password password root'
debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password root'
install MySQL mysql-server libmysqlclient-dev
mysql -uroot -proot <<SQL
CREATE USER 'rails'@'localhost';
CREATE DATABASE activerecord_unittest DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
CREATE DATABASE activerecord_unittest2 DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
GRANT ALL PRIVILEGES ON activerecord_unittest.* to 'rails'@'localhost';
GRANT ALL PRIVILEGES ON activerecord_unittest2.* to 'rails'@'localhost';
GRANT ALL PRIVILEGES ON inexistent_activerecord_unittest.* to 'rails'@'localhost';
SQL
install 'Nokogiri dependencies' libxml2 libxml2-dev libxslt1-dev libxslt-dev libqt4-dev imagemagick
install 'Capybara dependencies' libqt4-dev
install 'ExecJS runtime' nodejs
install 'Other' imagemagick
# Needed for docs generation.
update-locale LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LC_ALL=en_US.UTF-8
echo 'all set, rock on!'
|
jgherder/rails-dev-box
|
bootstrap.sh
|
Shell
|
mit
| 2,676 |
#
# Copyright (C) 2011 OpenWrt.org
#
. /lib/functions/system.sh
. /lib/ar71xx.sh
PART_NAME=firmware
RAMFS_COPY_DATA=/lib/ar71xx.sh
RAMFS_COPY_BIN='nandwrite'
CI_BLKSZ=65536
CI_LDADR=0x80060000
PLATFORM_DO_UPGRADE_COMBINED_SEPARATE_MTD=0
platform_find_partitions() {
local first dev size erasesize name
while read dev size erasesize name; do
name=${name#'"'}; name=${name%'"'}
case "$name" in
vmlinux.bin.l7|vmlinux|kernel|linux|linux.bin|rootfs|filesystem)
if [ -z "$first" ]; then
first="$name"
else
echo "$erasesize:$first:$name"
break
fi
;;
esac
done < /proc/mtd
}
platform_find_kernelpart() {
local part
for part in "${1%:*}" "${1#*:}"; do
case "$part" in
vmlinux.bin.l7|vmlinux|kernel|linux|linux.bin)
echo "$part"
break
;;
esac
done
}
platform_find_rootfspart() {
local part
for part in "${1%:*}" "${1#*:}"; do
[ "$part" != "$2" ] && echo "$part" && break
done
}
platform_do_upgrade_combined() {
local partitions=$(platform_find_partitions)
local kernelpart=$(platform_find_kernelpart "${partitions#*:}")
local erase_size=$((0x${partitions%%:*})); partitions="${partitions#*:}"
local kern_length=0x$(dd if="$1" bs=2 skip=1 count=4 2>/dev/null)
local kern_blocks=$(($kern_length / $CI_BLKSZ))
local root_blocks=$((0x$(dd if="$1" bs=2 skip=5 count=4 2>/dev/null) / $CI_BLKSZ))
if [ -n "$partitions" ] && [ -n "$kernelpart" ] && \
[ ${kern_blocks:-0} -gt 0 ] && \
[ ${root_blocks:-0} -gt 0 ] && \
[ ${erase_size:-0} -gt 0 ];
then
local rootfspart=$(platform_find_rootfspart "$partitions" "$kernelpart")
local append=""
[ -f "$CONF_TAR" -a "$SAVE_CONFIG" -eq 1 ] && append="-j $CONF_TAR"
if [ "$PLATFORM_DO_UPGRADE_COMBINED_SEPARATE_MTD" -ne 1 ]; then
( dd if="$1" bs=$CI_BLKSZ skip=1 count=$kern_blocks 2>/dev/null; \
dd if="$1" bs=$CI_BLKSZ skip=$((1+$kern_blocks)) count=$root_blocks 2>/dev/null ) | \
mtd -r $append -F$kernelpart:$kern_length:$CI_LDADR,rootfs write - $partitions
elif [ -n "$rootfspart" ]; then
dd if="$1" bs=$CI_BLKSZ skip=1 count=$kern_blocks 2>/dev/null | \
mtd write - $kernelpart
dd if="$1" bs=$CI_BLKSZ skip=$((1+$kern_blocks)) count=$root_blocks 2>/dev/null | \
mtd -r $append write - $rootfspart
fi
fi
PLATFORM_DO_UPGRADE_COMBINED_SEPARATE_MTD=0
}
tplink_get_image_hwid() {
get_image "$@" | dd bs=4 count=1 skip=16 2>/dev/null | hexdump -v -n 4 -e '1/1 "%02x"'
}
tplink_get_image_mid() {
get_image "$@" | dd bs=4 count=1 skip=17 2>/dev/null | hexdump -v -n 4 -e '1/1 "%02x"'
}
tplink_get_image_boot_size() {
get_image "$@" | dd bs=4 count=1 skip=37 2>/dev/null | hexdump -v -n 4 -e '1/1 "%02x"'
}
tplink_pharos_check_support_list() {
local image="$1"
local offset="$2"
local model="$3"
# Here $image is given to dd directly instead of using get_image;
# otherwise the skip will take almost a second (as dd can't seek)
dd if="$image" bs=1 skip=$offset count=1024 2>/dev/null | (
while IFS= read -r line; do
[ "$line" = "$model" ] && exit 0
done
exit 1
)
}
tplink_pharos_check_image() {
local magic_long="$(get_magic_long "$1")"
[ "$magic_long" != "7f454c46" ] && {
echo "Invalid image magic '$magic_long'"
return 1
}
local model_string="$(tplink_pharos_get_model_string)"
# New images have the support list at 7802888, old ones at 1511432
tplink_pharos_check_support_list "$1" 7802888 "$model_string" || \
tplink_pharos_check_support_list "$1" 1511432 "$model_string" || {
echo "Unsupported image (model not in support-list)"
return 1
}
return 0
}
seama_get_type_magic() {
get_image "$@" | dd bs=1 count=4 skip=53 2>/dev/null | hexdump -v -n 4 -e '1/1 "%02x"'
}
wrgg_get_image_magic() {
get_image "$@" | dd bs=4 count=1 skip=8 2>/dev/null | hexdump -v -n 4 -e '1/1 "%02x"'
}
cybertan_get_image_magic() {
get_image "$@" | dd bs=8 count=1 skip=0 2>/dev/null | hexdump -v -n 8 -e '1/1 "%02x"'
}
cybertan_check_image() {
local magic="$(cybertan_get_image_magic "$1")"
local fw_magic="$(cybertan_get_hw_magic)"
[ "$fw_magic" != "$magic" ] && {
echo "Invalid image, ID mismatch, got:$magic, but need:$fw_magic"
return 1
}
return 0
}
platform_do_upgrade_compex() {
local fw_file=$1
local fw_part=$PART_NAME
local fw_mtd=$(find_mtd_part $fw_part)
local fw_length=0x$(dd if="$fw_file" bs=2 skip=1 count=4 2>/dev/null)
local fw_blocks=$(($fw_length / 65536))
if [ -n "$fw_mtd" ] && [ ${fw_blocks:-0} -gt 0 ]; then
local append=""
[ -f "$CONF_TAR" -a "$SAVE_CONFIG" -eq 1 ] && append="-j $CONF_TAR"
sync
dd if="$fw_file" bs=64k skip=1 count=$fw_blocks 2>/dev/null | \
mtd $append write - "$fw_part"
fi
}
alfa_check_image() {
local magic_long="$(get_magic_long "$1")"
local fw_part_size=$(mtd_get_part_size firmware)
case "$magic_long" in
"27051956")
[ "$fw_part_size" != "16318464" ] && {
echo "Invalid image magic \"$magic_long\" for $fw_part_size bytes"
return 1
}
;;
"68737173")
[ "$fw_part_size" != "7929856" ] && {
echo "Invalid image magic \"$magic_long\" for $fw_part_size bytes"
return 1
}
;;
esac
return 0
}
platform_check_image() {
local board=$(board_name)
local magic="$(get_magic_word "$1")"
local magic_long="$(get_magic_long "$1")"
[ "$#" -gt 1 ] && return 1
case "$board" in
airgateway|\
airgatewaypro|\
airrouter|\
ap121f|\
ap132|\
ap531b0|\
ap90q|\
archer-c25-v1|\
archer-c58-v1|\
archer-c59-v1|\
archer-c60-v1|\
archer-c60-v2|\
archer-c7-v4|\
bullet-m|\
c-55|\
carambola2|\
cf-e316n-v2|\
cf-e320n-v2|\
cf-e355ac-v1|\
cf-e355ac-v2|\
cf-e375ac|\
cf-e380ac-v1|\
cf-e380ac-v2|\
cf-e385ac|\
cf-e520n|\
cf-e530n|\
cpe505n|\
cpe830|\
cpe870|\
dap-1330-a1|\
dgl-5500-a1|\
dhp-1565-a1|\
dir-505-a1|\
dir-600-a1|\
dir-615-c1|\
dir-615-e1|\
dir-615-e4|\
dir-615-i1|\
dir-825-c1|\
dir-835-a1|\
dlan-hotspot|\
dlan-pro-1200-ac|\
dlan-pro-500-wp|\
dr342|\
dr531|\
dragino2|\
e1700ac-v2|\
e600g-v2|\
e600gac-v2|\
ebr-2310-c1|\
ens202ext|\
epg5000|\
esr1750|\
esr900|\
ew-balin|\
ew-dorin|\
ew-dorin-router|\
gl-ar150|\
gl-ar300m|\
gl-ar300|\
gl-ar750|\
gl-domino|\
gl-mifi|\
gl-usb150|\
hiwifi-hc6361|\
hornet-ub-x2|\
jwap230|\
lbe-m5|\
lima|\
loco-m-xw|\
mzk-w04nu|\
mzk-w300nh|\
n5q|\
nanostation-m|\
nanostation-m-xw|\
nbg460n_550n_550nh|\
pqi-air-pen|\
r36a|\
r602n|\
rme-eg200|\
rocket-m|\
rocket-m-ti|\
rocket-m-xw|\
rw2458n|\
sc1750|\
sc300m|\
sc450|\
sr3200|\
t830|\
tew-632brp|\
tew-712br|\
tew-732br|\
tew-823dru|\
tl-wr1043n-v5|\
tl-wr942n-v1|\
unifi|\
unifi-outdoor|\
unifiac-lite|\
unifiac-pro|\
wam250|\
weio|\
whr-g301n|\
whr-hp-g300n|\
whr-hp-gn|\
wlae-ag300n|\
wndap360|\
wpj342|\
wpj344|\
wpj531|\
wpj558|\
wpj563|\
wrt400n|\
wrtnode2q|\
wzr-450hp2|\
wzr-hp-ag300h|\
wzr-hp-g300nh|\
wzr-hp-g300nh2|\
wzr-hp-g450h|\
xd3200)
[ "$magic" != "2705" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
alfa-ap96|\
alfa-nx|\
ap121|\
ap121-mini|\
ap135-020|\
ap136-010|\
ap136-020|\
ap147-010|\
ap152|\
ap91-5g|\
ap96|\
arduino-yun|\
bhr-4grv2|\
bxu2000n-2-a1|\
db120|\
dr344|\
dw33d|\
f9k1115v2|\
hornet-ub|\
mr12|\
mr16|\
zbt-we1526|\
zcn-1523h-2|\
zcn-1523h-5)
[ "$magic_long" != "68737173" -a "$magic_long" != "19852003" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
all0258n|\
all0315n|\
cap324|\
cap4200ag|\
cr3000|\
cr5000)
platform_check_image_allnet "$1" && return 0
return 1
;;
all0305|\
eap300v2|\
eap7660d|\
ja76pf|\
ja76pf2|\
jwap003|\
ls-sr71|\
pb42|\
pb44|\
routerstation|\
routerstation-pro|\
wp543|\
wpe72)
[ "$magic" != "4349" ] && {
echo "Invalid image. Use *-sysupgrade.bin files on this board"
return 1
}
local md5_img=$(dd if="$1" bs=2 skip=9 count=16 2>/dev/null)
local md5_chk=$(dd if="$1" bs=$CI_BLKSZ skip=1 2>/dev/null | md5sum -); md5_chk="${md5_chk%% *}"
if [ -n "$md5_img" -a -n "$md5_chk" ] && [ "$md5_img" = "$md5_chk" ]; then
return 0
else
echo "Invalid image. Contents do not match checksum (image:$md5_img calculated:$md5_chk)"
return 1
fi
return 0
;;
antminer-s1|\
antminer-s3|\
antrouter-r1|\
archer-c5|\
archer-c7|\
el-m150|\
el-mini|\
gl-inet|\
lan-turtle|\
mc-mac1200r|\
minibox-v1|\
omy-g1|\
omy-x1|\
onion-omega|\
oolite-v1|\
oolite-v5.2|\
oolite-v5.2-dev|\
packet-squirrel|\
re355|\
re450|\
rut900|\
smart-300|\
som9331|\
tellstick-znet-lite|\
tl-mr10u|\
tl-mr11u|\
tl-mr12u|\
tl-mr13u|\
tl-mr3020|\
tl-mr3040|\
tl-mr3040-v2|\
tl-mr3220|\
tl-mr3220-v2|\
tl-mr3420|\
tl-mr3420-v2|\
tl-mr6400|\
tl-wa701nd-v2|\
tl-wa7210n-v2|\
tl-wa750re|\
tl-wa7510n|\
tl-wa801nd-v2|\
tl-wa801nd-v3|\
tl-wa830re-v2|\
tl-wa850re|\
tl-wa850re-v2|\
tl-wa855re-v1|\
tl-wa860re|\
tl-wa901nd|\
tl-wa901nd-v2|\
tl-wa901nd-v3|\
tl-wa901nd-v4|\
tl-wa901nd-v5|\
tl-wdr3320-v2|\
tl-wdr3500|\
tl-wdr4300|\
tl-wdr4900-v2|\
tl-wdr6500-v2|\
tl-wpa8630|\
tl-wr1041n-v2|\
tl-wr1043nd|\
tl-wr1043nd-v2|\
tl-wr1043nd-v4|\
tl-wr2543n|\
tl-wr703n|\
tl-wr710n|\
tl-wr720n-v3|\
tl-wr740n-v5|\
tl-wr740n-v6|\
tl-wr741nd|\
tl-wr742n-v5|\
tl-wr741nd-v4|\
tl-wr802n-v1|\
tl-wr802n-v2|\
tl-wr810n|\
tl-wr810n-v2|\
tl-wr840n-v2|\
tl-wr840n-v3|\
tl-wr841n-v1|\
tl-wr841n-v7|\
tl-wr841n-v8|\
tl-wr841n-v9|\
tl-wr841n-v11|\
tl-wr842n-v2|\
tl-wr842n-v3|\
tl-wr882n-v1|\
tl-wr902ac-v1|\
tl-wr940n-v4|\
tl-wr940n-v6|\
tl-wr941nd|\
tl-wr941nd-v5|\
tl-wr941nd-v6|\
ts-d084|\
wifi-pineapple-nano)
local magic_ver="0100"
case "$board" in
tl-wdr6500-v2)
magic_ver="0200"
;;
esac
[ "$magic" != "$magic_ver" ] && {
echo "Invalid image type."
return 1
}
local hwid
local mid
local imagehwid
local imagemid
hwid=$(tplink_get_hwid)
mid=$(tplink_get_mid)
imagehwid=$(tplink_get_image_hwid "$1")
imagemid=$(tplink_get_image_mid "$1")
[ "$hwid" != "$imagehwid" -o "$mid" != "$imagemid" ] && {
echo "Invalid image, hardware ID mismatch, hw:$hwid $mid image:$imagehwid $imagemid."
return 1
}
local boot_size
boot_size=$(tplink_get_image_boot_size "$1")
[ "$boot_size" != "00000000" ] && {
echo "Invalid image, it contains a bootloader."
return 1
}
return 0
;;
bsb|\
dir-825-b1|\
tew-673gru)
dir825b_check_image "$1" && return 0
;;
rb-411|\
rb-411u|\
rb-433|\
rb-433u|\
rb-435g|\
rb-450|\
rb-450g|\
rb-493|\
rb-493g|\
rb-750|\
rb-750gl|\
rb-751|\
rb-751g|\
rb-911g-2hpnd|\
rb-911g-5hpnd|\
rb-911g-5hpacd|\
rb-912uag-2hpnd|\
rb-912uag-5hpnd|\
rb-921gs-5hpacd-r2|\
rb-951g-2hnd|\
rb-951ui-2hnd|\
rb-2011l|\
rb-2011il|\
rb-2011uas|\
rb-2011uas-2hnd|\
rb-2011uias|\
rb-2011uias-2hnd|\
rb-sxt2n|\
rb-sxt5n)
nand_do_platform_check routerboard $1
return $?
;;
c-60|\
hiveap-121|\
nbg6716|\
r6100|\
rambutan|\
wi2a-ac200i|\
wndr3700v4|\
wndr4300)
nand_do_platform_check $board $1
return $?
;;
cpe210|\
cpe510|\
eap120|\
wbs210|\
wbs510)
tplink_pharos_check_image "$1" && return 0
return 1
;;
a40|\
a60|\
mr1750|\
mr1750v2|\
mr600|\
mr600v2|\
mr900|\
mr900v2|\
om2p|\
om2p-hs|\
om2p-hsv2|\
om2p-hsv3|\
om2p-hsv4|\
om2p-lc|\
om2pv2|\
om2pv4|\
om5p|\
om5p-ac|\
om5p-acv2|\
om5p-an)
platform_check_image_openmesh "$magic_long" "$1" && return 0
return 1
;;
mr18|\
z1)
merakinand_do_platform_check $board $1
return $?
;;
dir-869-a1|\
mynet-n600|\
mynet-n750|\
qihoo-c301)
[ "$magic_long" != "5ea3a417" ] && {
echo "Invalid image, bad magic: $magic_long"
return 1
}
local typemagic=$(seama_get_type_magic "$1")
[ "$typemagic" != "6669726d" ] && {
echo "Invalid image, bad type: $typemagic"
return 1
}
return 0
;;
e2100l|\
mynet-rext|\
wrt160nl)
cybertan_check_image "$1" && return 0
return 1
;;
nbg6616|\
uap-pro|\
unifi-outdoor-plus)
[ "$magic_long" != "19852003" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
tube2h)
alfa_check_image "$1" && return 0
return 1
;;
wndr3700|\
wnr1000-v2|\
wnr2000-v3|\
wnr612-v2|\
wpn824n)
local hw_magic
hw_magic="$(ar71xx_get_mtd_part_magic firmware)"
[ "$magic_long" != "$hw_magic" ] && {
echo "Invalid image, hardware ID mismatch, hw:$hw_magic image:$magic_long."
return 1
}
return 0
;;
wnr2000-v4)
[ "$magic_long" != "32303034" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
wnr2200)
[ "$magic_long" != "32323030" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
dap-2695-a1)
local magic=$(wrgg_get_image_magic "$1")
[ "$magic" != "21030820" ] && {
echo "Invalid image, bad type: $magic"
return 1
}
return 0;
;;
# these boards use metadata images
fritz300e|\
rb-750-r2|\
rb-750p-pbr2|\
rb-750up-r2|\
rb-911-2hn|\
rb-911-5hn|\
rb-941-2nd|\
rb-951ui-2nd|\
rb-952ui-5ac2nd|\
rb-962uigs-5hact2hnt|\
rb-lhg-5nd|\
rb-map-2nd|\
rb-mapl-2nd|\
rb-wap-2nd|\
rb-wapg-5hact2hnd)
return 0
;;
esac
echo "Sysupgrade is not yet supported on $board."
return 1
}
platform_pre_upgrade() {
local board=$(board_name)
case "$board" in
rb-750-r2|\
rb-750p-pbr2|\
rb-750up-r2|\
rb-911-2hn|\
rb-911-5hn|\
rb-941-2nd|\
rb-951ui-2nd|\
rb-952ui-5ac2nd|\
rb-962uigs-5hact2hnt|\
rb-lhg-5nd|\
rb-map-2nd|\
rb-mapl-2nd|\
rb-wap-2nd|\
rb-wapg-5hact2hnd)
# erase firmware if booted from initramfs
[ -z "$(rootfs_type)" ] && mtd erase firmware
;;
esac
}
platform_nand_pre_upgrade() {
local board=$(board_name)
case "$board" in
rb*)
CI_KERNPART=none
local fw_mtd=$(find_mtd_part kernel)
fw_mtd="${fw_mtd/block/}"
[ -n "$fw_mtd" ] || return
mtd erase kernel
tar xf "$1" sysupgrade-routerboard/kernel -O | nandwrite -o "$fw_mtd" -
;;
wi2a-ac200i)
case "$(fw_printenv -n dualPartition)" in
imgA)
fw_setenv dualPartition imgB
fw_setenv ActImg NokiaImageB
;;
imgB)
fw_setenv dualPartition imgA
fw_setenv ActImg NokiaImageA
;;
esac
ubiblock -r /dev/ubiblock0_0 2>/dev/null >/dev/null
rm -f /dev/ubiblock0_0
ubidetach -d 0 2>/dev/null >/dev/null
CI_UBIPART=ubi_alt
CI_KERNPART=kernel_alt
;;
esac
}
platform_do_upgrade() {
local board=$(board_name)
case "$board" in
all0258n)
platform_do_upgrade_allnet "0x9f050000" "$ARGV"
;;
all0305|\
eap7660d|\
ja76pf|\
ja76pf2|\
jwap003|\
ls-sr71|\
pb42|\
pb44|\
routerstation|\
routerstation-pro)
platform_do_upgrade_combined "$ARGV"
;;
all0315n)
platform_do_upgrade_allnet "0x9f080000" "$ARGV"
;;
cap4200ag|\
eap300v2|\
ens202ext)
platform_do_upgrade_allnet "0xbf0a0000" "$ARGV"
;;
dir-825-b1|\
tew-673gru)
platform_do_upgrade_dir825b "$ARGV"
;;
a40|\
a60|\
mr1750|\
mr1750v2|\
mr600|\
mr600v2|\
mr900|\
mr900v2|\
om2p|\
om2p-hs|\
om2p-hsv2|\
om2p-hsv3|\
om2p-hsv4|\
om2p-lc|\
om2pv2|\
om2pv4|\
om5p|\
om5p-ac|\
om5p-acv2|\
om5p-an)
platform_do_upgrade_openmesh "$ARGV"
;;
c-60|\
hiveap-121|\
nbg6716|\
r6100|\
rambutan|\
rb-411|\
rb-411u|\
rb-433|\
rb-433u|\
rb-435g|\
rb-450|\
rb-450g|\
rb-493|\
rb-493g|\
rb-750|\
rb-750gl|\
rb-751|\
rb-751g|\
rb-911g-2hpnd|\
rb-911g-5hpacd|\
rb-911g-5hpnd|\
rb-912uag-2hpnd|\
rb-912uag-5hpnd|\
rb-921gs-5hpacd-r2|\
rb-951g-2hnd|\
rb-951ui-2hnd|\
rb-2011il|\
rb-2011l|\
rb-2011uas|\
rb-2011uas-2hnd|\
rb-2011uias|\
rb-2011uias-2hnd|\
rb-sxt2n|\
rb-sxt5n|\
wi2a-ac200i|\
wndr3700v4|\
wndr4300)
nand_do_upgrade "$1"
;;
mr18|\
z1)
merakinand_do_upgrade "$1"
;;
uap-pro|\
unifi-outdoor-plus)
MTD_CONFIG_ARGS="-s 0x180000"
default_do_upgrade "$ARGV"
;;
wp543|\
wpe72)
platform_do_upgrade_compex "$ARGV"
;;
*)
default_do_upgrade "$ARGV"
;;
esac
}
|
981213/openwrt
|
target/linux/ar71xx/base-files/lib/upgrade/platform.sh
|
Shell
|
gpl-2.0
| 15,728 |
#!/bin/bash
#set -xv
set -e
CWD=`pwd`
LIBS=$1
LND_LIBS=$2
PTHREAD_LIBS=$3
# do cleanup at first
rm -f liblst.so
ALL_OBJS=
build_obj_list() {
_objs=`$AR -t $1/$2 | grep -v SYMDEF | grep -v SORTED`
for _lib in $_objs; do
ALL_OBJS=$ALL_OBJS"$1/$_lib ";
done;
}
# lnet components libs
build_obj_list ../../libcfs/libcfs libcfs.a
build_obj_list ../../libcfs/libcfs libcfsutil.a
if $(echo "$LND_LIBS" | grep "socklnd" >/dev/null) ; then
build_obj_list ../../lnet/ulnds/socklnd libsocklnd.a
fi
build_obj_list ../../lnet/lnet liblnet.a
build_obj_list ../../lnet/selftest libselftest.a
# create static lib lustre
rm -f $CWD/liblst.a
$AR -cru $CWD/liblst.a $ALL_OBJS
$RANLIB $CWD/liblst.a
|
pratikrupala/try
|
lnet/utils/genlib.sh
|
Shell
|
gpl-2.0
| 696 |
#!/bin/bash
docker run --rm --name mongod -t -i amiller/ubuntu-mongod:v1
|
atm08e/keystone-docker
|
mongod/start.sh
|
Shell
|
gpl-2.0
| 74 |
#!/bin/sh
DATA_PARTITION=/dev/mmcblk0p2
DATA_PARTITION_FS=auto
[ -r /boot/od.conf ] && . /boot/od.conf
if [ -z "$1" ] || [ "x$1" = "xstart" ]; then
echo "Mounting data partition..."
/bin/mount -o remount,rw /media
/bin/mount -t ${DATA_PARTITION_FS} ${DATA_PARTITION} /media/data
/bin/mount -o remount,ro /media
fi
|
gcwnow/buildroot
|
board/opendingux/gcw0/target_skeleton/etc/init.d/S03mountmedia.sh
|
Shell
|
gpl-2.0
| 322 |
#!/usr/bin/env bash
# Pi-hole: A black hole for Internet advertisements
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
# Network-wide ad blocking via your own hardware.
#
# Completely uninstalls Pi-hole
#
# This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license.
source "/opt/pihole/COL_TABLE"
while true; do
read -rp " ${QST} Are you sure you would like to remove ${COL_WHITE}Pi-hole${COL_NC}? [y/N] " yn
case ${yn} in
[Yy]* ) break;;
[Nn]* ) echo -e "${OVER} ${COL_LIGHT_GREEN}Uninstall has been canceled${COL_NC}"; exit 0;;
* ) echo -e "${OVER} ${COL_LIGHT_GREEN}Uninstall has been canceled${COL_NC}"; exit 0;;
esac
done
# Must be root to uninstall
str="Root user check"
if [[ ${EUID} -eq 0 ]]; then
echo -e " ${TICK} ${str}"
else
# Check if sudo is actually installed
# If it isn't, exit because the uninstall can not complete
if [ -x "$(command -v sudo)" ]; then
export SUDO="sudo"
else
echo -e " ${CROSS} ${str}
Script called with non-root privileges
The Pi-hole requires elevated privileges to uninstall"
exit 1
fi
fi
readonly PI_HOLE_FILES_DIR="/etc/.pihole"
PH_TEST="true"
source "${PI_HOLE_FILES_DIR}/automated install/basic-install.sh"
# setupVars set in basic-install.sh
source "${setupVars}"
# package_manager_detect() sourced from basic-install.sh
package_manager_detect
# Install packages used by the Pi-hole
DEPS=("${INSTALLER_DEPS[@]}" "${PIHOLE_DEPS[@]}")
if [[ "${INSTALL_WEB_SERVER}" == true ]]; then
# Install the Web dependencies
DEPS+=("${PIHOLE_WEB_DEPS[@]}")
fi
# Compatibility
if [ -x "$(command -v apt-get)" ]; then
# Debian Family
PKG_REMOVE=("${PKG_MANAGER}" -y remove --purge)
package_check() {
dpkg-query -W -f='${Status}' "$1" 2>/dev/null | grep -c "ok installed"
}
elif [ -x "$(command -v rpm)" ]; then
# Fedora Family
PKG_REMOVE=("${PKG_MANAGER}" remove -y)
package_check() {
rpm -qa | grep "^$1-" > /dev/null
}
else
echo -e " ${CROSS} OS distribution not supported"
exit 1
fi
removeAndPurge() {
# Purge dependencies
echo ""
for i in "${DEPS[@]}"; do
if package_check "${i}" > /dev/null; then
while true; do
read -rp " ${QST} Do you wish to remove ${COL_WHITE}${i}${COL_NC} from your system? [Y/N] " yn
case ${yn} in
[Yy]* )
echo -ne " ${INFO} Removing ${i}...";
${SUDO} "${PKG_REMOVE[@]}" "${i}" &> /dev/null;
echo -e "${OVER} ${INFO} Removed ${i}";
break;;
[Nn]* ) echo -e " ${INFO} Skipped ${i}"; break;;
esac
done
else
echo -e " ${INFO} Package ${i} not installed"
fi
done
# Remove dnsmasq config files
${SUDO} rm -f /etc/dnsmasq.conf /etc/dnsmasq.conf.orig /etc/dnsmasq.d/*-pihole*.conf &> /dev/null
echo -e " ${TICK} Removing dnsmasq config files"
# Call removeNoPurge to remove Pi-hole specific files
removeNoPurge
}
removeNoPurge() {
# Only web directories/files that are created by Pi-hole should be removed
echo -ne " ${INFO} Removing Web Interface..."
${SUDO} rm -rf /var/www/html/admin &> /dev/null
${SUDO} rm -rf /var/www/html/pihole &> /dev/null
${SUDO} rm -f /var/www/html/index.lighttpd.orig &> /dev/null
# If the web directory is empty after removing these files, then the parent html directory can be removed.
if [ -d "/var/www/html" ]; then
if [[ ! "$(ls -A /var/www/html)" ]]; then
${SUDO} rm -rf /var/www/html &> /dev/null
fi
fi
echo -e "${OVER} ${TICK} Removed Web Interface"
# Attempt to preserve backwards compatibility with older versions
# to guarantee no additional changes were made to /etc/crontab after
# the installation of pihole, /etc/crontab.pihole should be permanently
# preserved.
if [[ -f /etc/crontab.orig ]]; then
${SUDO} mv /etc/crontab /etc/crontab.pihole
${SUDO} mv /etc/crontab.orig /etc/crontab
${SUDO} service cron restart
echo -e " ${TICK} Restored the default system cron"
fi
# Attempt to preserve backwards compatibility with older versions
if [[ -f /etc/cron.d/pihole ]];then
${SUDO} rm -f /etc/cron.d/pihole &> /dev/null
echo -e " ${TICK} Removed /etc/cron.d/pihole"
fi
if package_check lighttpd > /dev/null; then
if [[ -f /etc/lighttpd/lighttpd.conf.orig ]]; then
${SUDO} mv /etc/lighttpd/lighttpd.conf.orig /etc/lighttpd/lighttpd.conf
fi
if [[ -f /etc/lighttpd/external.conf ]]; then
${SUDO} rm /etc/lighttpd/external.conf
fi
echo -e " ${TICK} Removed lighttpd configs"
fi
${SUDO} rm -f /etc/dnsmasq.d/adList.conf &> /dev/null
${SUDO} rm -f /etc/dnsmasq.d/01-pihole.conf &> /dev/null
${SUDO} rm -f /etc/dnsmasq.d/06-rfc6761.conf &> /dev/null
${SUDO} rm -rf /var/log/*pihole* &> /dev/null
${SUDO} rm -rf /etc/pihole/ &> /dev/null
${SUDO} rm -rf /etc/.pihole/ &> /dev/null
${SUDO} rm -rf /opt/pihole/ &> /dev/null
${SUDO} rm -f /usr/local/bin/pihole &> /dev/null
${SUDO} rm -f /etc/bash_completion.d/pihole &> /dev/null
${SUDO} rm -f /etc/sudoers.d/pihole &> /dev/null
echo -e " ${TICK} Removed config files"
# Restore Resolved
if [[ -e /etc/systemd/resolved.conf.orig ]]; then
${SUDO} cp -p /etc/systemd/resolved.conf.orig /etc/systemd/resolved.conf
systemctl reload-or-restart systemd-resolved
fi
# Remove FTL
if command -v pihole-FTL &> /dev/null; then
echo -ne " ${INFO} Removing pihole-FTL..."
if [[ -x "$(command -v systemctl)" ]]; then
systemctl stop pihole-FTL
else
service pihole-FTL stop
fi
${SUDO} rm -f /etc/init.d/pihole-FTL
${SUDO} rm -f /usr/bin/pihole-FTL
echo -e "${OVER} ${TICK} Removed pihole-FTL"
fi
# If the pihole manpage exists, then delete and rebuild man-db
if [[ -f /usr/local/share/man/man8/pihole.8 ]]; then
${SUDO} rm -f /usr/local/share/man/man8/pihole.8 /usr/local/share/man/man8/pihole-FTL.8 /usr/local/share/man/man5/pihole-FTL.conf.5
${SUDO} mandb -q &>/dev/null
echo -e " ${TICK} Removed pihole man page"
fi
# If the pihole user exists, then remove
if id "pihole" &> /dev/null; then
if ${SUDO} userdel -r pihole 2> /dev/null; then
echo -e " ${TICK} Removed 'pihole' user"
else
echo -e " ${CROSS} Unable to remove 'pihole' user"
fi
fi
# If the pihole group exists, then remove
if getent group "pihole" &> /dev/null; then
if ${SUDO} groupdel pihole 2> /dev/null; then
echo -e " ${TICK} Removed 'pihole' group"
else
echo -e " ${CROSS} Unable to remove 'pihole' group"
fi
fi
echo -e "\\n We're sorry to see you go, but thanks for checking out Pi-hole!
If you need help, reach out to us on GitHub, Discourse, Reddit or Twitter
Reinstall at any time: ${COL_WHITE}curl -sSL https://install.pi-hole.net | bash${COL_NC}
${COL_LIGHT_RED}Please reset the DNS on your router/clients to restore internet connectivity
${COL_LIGHT_GREEN}Uninstallation Complete! ${COL_NC}"
}
######### SCRIPT ###########
echo -e " ${INFO} Be sure to confirm if any dependencies should not be removed"
while true; do
echo -e " ${INFO} ${COL_YELLOW}The following dependencies may have been added by the Pi-hole install:"
echo -n " "
for i in "${DEPS[@]}"; do
echo -n "${i} "
done
echo "${COL_NC}"
read -rp " ${QST} Do you wish to go through each dependency for removal? (Choosing No will leave all dependencies installed) [Y/n] " yn
case ${yn} in
[Yy]* ) removeAndPurge; break;;
[Nn]* ) removeNoPurge; break;;
* ) removeAndPurge; break;;
esac
done
|
jacobsalmela/pi-hole
|
automated install/uninstall.sh
|
Shell
|
gpl-2.0
| 8,144 |
#!/bin/bash
#
# this should be run from the src directory which contains the subsurface
# directory; the layout should look like this:
# .../src/subsurface
#
# the script will build Subsurface and libdivecomputer (plus some other
# dependencies if requestsed) from source.
#
# it installs the libraries and subsurface in the install-root subdirectory
# of the current directory (except on Mac where the Subsurface.app ends up
# in subsurface/build)
#
# by default it puts the build folders in
# ./subsurface/libdivecomputer/build (libdivecomputer build)
# ./subsurface/build (desktop build)
# ./subsurface/build-mobile (mobile build)
# ./subsurface/build-downloader (headless downloader build)
#
# there is basic support for building from a shared directory, e.g., with
# one subsurface source tree on a host computer, accessed from multiple
# VMs as well as the host to build without stepping on each other - the
# one exception is running autotools for libdiveconputer which has to
# happen in the shared libdivecomputer folder
# one way to achieve this is to have ./subsurface be a symlink; in that
# case the build directories are libdivecomputer/build, build, build-mobile
# alternatively a build prefix can be explicitly given with -build-prefix
# that build prefix is directly pre-pended to the destinations mentioned
# above - if this is a directory, it needs to end with '/'
# don't keep going if we run into an error
set -e
# create a log file of the build
exec 1> >(tee build.log) 2>&1
SRC=$(pwd)
if [[ -L subsurface && -d subsurface ]] ; then
# ./subsurface is a symbolic link to the source directory, so let's
# set up a prefix that puts the build directories in the current directory
# but this can be overwritten via the command line
BUILD_PREFIX="$SRC/"
fi
PLATFORM=$(uname)
BTSUPPORT="ON"
DEBUGRELEASE="Debug"
SRC_DIR="subsurface"
# deal with all the command line arguments
while [[ $# -gt 0 ]] ; do
arg="$1"
case $arg in
-no-bt)
# force Bluetooth support off
BTSUPPORT="OFF"
;;
-quick)
# only build libdivecomputer and Subsurface - this assumes that all other dependencies don't need rebuilding
QUICK="1"
;;
-src-dir)
# instead of using "subsurface" as source directory, use src/<srcdir>
# this is convinient when using "git worktree" to have multiple branches
# checked out on the computer.
# remark <srcdir> must be in src (in parallel to subsurface), in order to
# use the same 3rd party set.
shift
SRC_DIR="$1"
;;
-build-deps)
# in order to build the dependencies on Mac for release builds (to deal with the macosx-version-min for those)
# call this script with -build-deps
BUILD_DEPS="1"
;;
-build-prefix)
# instead of building in build & build-mobile in the current directory, build in <buildprefix>build
# and <buildprefix>build-mobile; notice that there's no slash between the prefix and the two directory
# names, so if the prefix is supposed to be a path, add the slash at the end of it, or do funky things
# where build/build-mobile get appended to partial path name
shift
BUILD_PREFIX="$1"
;;
-build-with-webkit)
# unless you build Qt from source (or at least webkit from source, you won't have webkit installed
# -build-with-webkit tells the script that in fact we can assume that webkit is present (it usually
# is still available on Linux distros)
BUILD_WITH_WEBKIT="1"
;;
-mobile)
# we are building Subsurface-mobile
# Note that this will run natively on the host OS.
# To cross build for Android or iOS (including simulator)
# use the scripts in packaging/xxx
BUILD_MOBILE="1"
;;
-desktop)
# we are building Subsurface
BUILD_DESKTOP="1"
;;
-downloader)
# we are building Subsurface-downloader
BUILD_DOWNLOADER="1"
;;
-both)
# we are building Subsurface and Subsurface-mobile
BUILD_MOBILE="1"
BUILD_DESKTOP="1"
;;
-all)
# we are building Subsurface, Subsurface-mobile, and Subsurface-downloader
BUILD_MOBILE="1"
BUILD_DESKTOP="1"
BUILD_DOWNLOADER="1"
;;
-create-appdir)
# we are building an AppImage as by product
CREATE_APPDIR="1"
;;
-release)
# don't build Debug binaries
DEBUGRELEASE="Release"
;;
*)
echo "Unknown command line argument $arg"
echo "Usage: build.sh [-no-bt] [-quick] [-build-deps] [-src-dir <SUBSURFACE directory>] [-build-prefix <PREFIX>] [-build-with-webkit] [-mobile] [-desktop] [-downloader] [-both] [-all] [-create-appdir] [-release]"
exit 1
;;
esac
shift
done
# recreate the old default behavior - no flag set implies build desktop
if [ "$BUILD_MOBILE$BUILD_DOWNLOADER" = "" ] ; then
BUILD_DESKTOP="1"
fi
if [ "$BUILD_DEPS" = "1" ] && [ "$QUICK" = "1" ] ; then
echo "Conflicting options; cannot request combine -build-deps and -quick"
exit 1;
fi
# Verify that the Xcode Command Line Tools are installed
if [ "$PLATFORM" = Darwin ] ; then
if [ -d /Developer/SDKs ] ; then
SDKROOT=/Developer/SDKs
elif [ -d /Library/Developer/CommandLineTools/SDKs ] ; then
SDKROOT=/Library/Developer/CommandLineTools/SDKs
elif [ -d /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs ] ; then
SDKROOT=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs
else
echo "Cannot find SDK sysroot (usually /Developer/SDKs or"
echo "/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs)"
exit 1;
fi
# find a 10.x base SDK to use, or if none can be found, find a numbered 11.x base SDK to use
BASESDK=$(ls $SDKROOT | grep "MacOSX10\.1.\.sdk" | head -1 | sed -e "s/MacOSX//;s/\.sdk//")
if [ -z "$BASESDK" ] ; then
BASESDK=$(ls $SDKROOT | grep -E "MacOSX11\.[0-9]+\.sdk" | head -1 | sed -e "s/MacOSX//;s/\.sdk//")
if [ -z "$BASESDK" ] ; then
echo "Cannot find a base SDK of type 10.x or 11.x under the SDK root of ${SDKROOT}"
exit 1;
fi
fi
echo "Using ${BASESDK} as the BASESDK under ${SDKROOT}"
OLDER_MAC="-mmacosx-version-min=${BASESDK} -isysroot${SDKROOT}/MacOSX${BASESDK}.sdk"
OLDER_MAC_CMAKE="-DCMAKE_OSX_DEPLOYMENT_TARGET=${BASESDK} -DCMAKE_OSX_SYSROOT=${SDKROOT}/MacOSX${BASESDK}.sdk/"
if [[ ! -d /usr/include && ! -d "${SDKROOT}/MacOSX${BASESDK}.sdk/usr/include" ]] ; then
echo "Error: Xcode Command Line Tools are not installed"
echo ""
echo "Please run:"
echo " xcode-select --install"
echo "to install them (you'll have to agree to Apple's licensing terms etc), then run build.sh again"
exit 1;
fi
fi
# normally this script builds the desktop version in subsurface/build
# the user can explicitly pick the builds requested
# for historic reasons, -both builds mobile and desktop, -all builds the downloader as well
if [ "$BUILD_MOBILE" = "1" ] ; then
echo "building Subsurface-mobile in ${SRC_DIR}/build-mobile"
BUILDS+=( "MobileExecutable" )
BUILDDIRS+=( "${BUILD_PREFIX}build-mobile" )
fi
if [ "$BUILD_DOWNLOADER" = "1" ] ; then
echo "building Subsurface-downloader in ${SRC_DIR}/build-downloader"
BUILDS+=( "DownloaderExecutable" )
BUILDDIRS+=( "${BUILD_PREFIX}build-downloader" )
fi
if [ "$BUILD_DESKTOP" = "1" ] || [ "$BUILDS" = "" ] ; then
# if no option is given, we build the desktopb version
echo "building Subsurface in ${SRC_DIR}/build"
BUILDS+=( "DesktopExecutable" )
BUILDDIRS+=( "${BUILD_PREFIX}build" )
fi
if [[ ! -d "${SRC_DIR}" ]] ; then
echo "please start this script from the directory containing the Subsurface source directory"
exit 1
fi
if [ -z "$BUILD_PREFIX" ] ; then
INSTALL_ROOT=$SRC/install-root
else
INSTALL_ROOT="$BUILD_PREFIX"install-root
fi
mkdir -p "$INSTALL_ROOT"
export INSTALL_ROOT
# make sure we find our own packages first (e.g., libgit2 only uses pkg_config to find libssh2)
export PKG_CONFIG_PATH=$INSTALL_ROOT/lib/pkgconfig:$PKG_CONFIG_PATH
echo Building from "$SRC", installing in "$INSTALL_ROOT"
# find qmake
if [ -n "$CMAKE_PREFIX_PATH" ] ; then
QMAKE=$CMAKE_PREFIX_PATH/../../bin/qmake
else
hash qmake > /dev/null 2> /dev/null && QMAKE=qmake
[ -z $QMAKE ] && hash qmake-qt5 > /dev/null 2> /dev/null && QMAKE=qmake-qt5
[ -z $QMAKE ] && echo "cannot find qmake or qmake-qt5" && exit 1
fi
# it's not entirely clear why we only set this on macOS, but this appears to be what works
if [ "$PLATFORM" = Darwin ] ; then
if [ -z "$CMAKE_PREFIX_PATH" ] ; then
# we already found qmake and can get the right path information from that
libdir=$($QMAKE -query QT_INSTALL_LIBS)
if [ $? -eq 0 ]; then
export CMAKE_PREFIX_PATH=$libdir/cmake
else
echo "something is broken with the Qt install"
exit 1
fi
fi
fi
# on Debian and Ubuntu based systems, the private QtLocation and
# QtPositioning headers aren't bundled. Download them if necessary.
if [ "$PLATFORM" = Linux ] ; then
QT_HEADERS_PATH=$($QMAKE -query QT_INSTALL_HEADERS)
QT_VERSION=$($QMAKE -query QT_VERSION)
if [ ! -d "$QT_HEADERS_PATH/QtLocation/$QT_VERSION/QtLocation/private" ] &&
[ ! -d "$INSTALL_ROOT"/include/QtLocation/private ] ; then
echo "Missing private Qt headers for $QT_VERSION; downloading them..."
QTLOC_GIT=./qtlocation_git
QTLOC_PRIVATE=$INSTALL_ROOT/include/QtLocation/private
QTPOS_PRIVATE=$INSTALL_ROOT/include/QtPositioning/private
rm -rf $QTLOC_GIT > /dev/null 2>&1
rm -rf "$INSTALL_ROOT"/include/QtLocation > /dev/null 2>&1
rm -rf "$INSTALL_ROOT"/include/QtPositioning > /dev/null 2>&1
git clone --branch "v$QT_VERSION" git://code.qt.io/qt/qtlocation.git --depth=1 $QTLOC_GIT
mkdir -p "$QTLOC_PRIVATE"
cd $QTLOC_GIT/src/location
find . -name '*_p.h' -print0 | xargs -0 cp -t "$QTLOC_PRIVATE"
cd "$SRC"
mkdir -p "$QTPOS_PRIVATE"
cd $QTLOC_GIT/src/positioning
find . -name '*_p.h' -print0 | xargs -0 cp -t "$QTPOS_PRIVATE"
cd "$SRC"
echo "* cleanup..."
rm -rf $QTLOC_GIT > /dev/null 2>&1
fi
fi
# set up the right file name extensions
if [ "$PLATFORM" = Darwin ] ; then
SH_LIB_EXT=dylib
if [ ! "$BUILD_DEPS" == "1" ] ; then
pkg-config --exists libgit2 && LIBGIT=$(pkg-config --modversion libgit2) && LIBGITMAJ=$(echo $LIBGIT | cut -d. -f1) && LIBGIT=$(echo $LIBGIT | cut -d. -f2)
if [[ "$LIBGITMAJ" -gt "0" || "$LIBGIT" -gt "25" ]] ; then
LIBGIT2_FROM_PKGCONFIG="-DLIBGIT2_FROM_PKGCONFIG=ON"
fi
fi
else
SH_LIB_EXT=so
LIBGIT_ARGS=" -DLIBGIT2_DYNAMIC=ON "
# check if we need to build libgit2 (and do so if necessary)
# first check pkgconfig (that will capture our own local build if
# this script has been run before)
if pkg-config --exists libgit2 ; then
LIBGIT=$(pkg-config --modversion libgit2)
LIBGITMAJ=$(echo $LIBGIT | cut -d. -f1)
LIBGIT=$(echo $LIBGIT | cut -d. -f2)
if [[ "$LIBGITMAJ" -gt "0" || "$LIBGIT" -gt "25" ]] ; then
LIBGIT2_FROM_PKGCONFIG="-DLIBGIT2_FROM_PKGCONFIG=ON"
fi
fi
if [[ "$LIBGITMAJ" -lt "1" && "$LIBGIT" -lt "26" ]] ; then
# maybe there's a system version that's new enough?
# Ugh that's uggly - read the ultimate filename, split at the last 'o' which gets us ".0.26.3" or ".1.0.0"
# since that starts with a dot, the field numbers in the cut need to be one higher
LDCONFIG=$(PATH=/sbin:/usr/sbin:$PATH which ldconfig)
if [ ! -z "$LDCONFIG" ] ; then
LIBGIT=$(realpath $("$LDCONFIG" -p | grep libgit2\\.so\\. | cut -d\ -f4) | awk -Fo '{ print $NF }')
LIBGITMAJ=$(echo $LIBGIT | cut -d. -f2)
LIBGIT=$(echo $LIBGIT | cut -d. -f3)
fi
fi
fi
if [[ $PLATFORM = Darwin && "$BUILD_DEPS" == "1" ]] ; then
# when building distributable binaries on a Mac, we cannot rely on anything from Homebrew,
# because that always requires the latest OS (how stupid is that - and they consider it a
# feature). So we painfully need to build the dependencies ourselves.
cd "$SRC"
./${SRC_DIR}/scripts/get-dep-lib.sh single . libz
pushd libz
# no, don't install pkgconfig files in .../libs/share/pkgconf - that's just weird
sed -i .bak 's/share\/pkgconfig/pkgconfig/' CMakeLists.txt
mkdir -p build
cd build
cmake "$OLDER_MAC_CMAKE" -DCMAKE_BUILD_TYPE="$DEBUGRELEASE" \
-DCMAKE_INSTALL_PREFIX="$INSTALL_ROOT" \
..
make -j4
make install
popd
./${SRC_DIR}/scripts/get-dep-lib.sh single . libcurl
pushd libcurl
bash ./buildconf
mkdir -p build
cd build
CFLAGS="$OLDER_MAC" ../configure --prefix="$INSTALL_ROOT" --with-darwinssl \
--disable-tftp --disable-ftp --disable-ldap --disable-ldaps --disable-imap --disable-pop3 --disable-smtp --disable-gopher --disable-smb --disable-rtsp
make -j4
make install
popd
./${SRC_DIR}/scripts/get-dep-lib.sh single . openssl
pushd openssl
mkdir -p build
cd build
../Configure --prefix="$INSTALL_ROOT" --openssldir="$INSTALL_ROOT" "$OLDER_MAC" darwin64-x86_64-cc
make depend
# all the tests fail because the assume that openssl is already installed. Odd? Still thinks work
make -j4 -k
make -k install
popd
./${SRC_DIR}/scripts/get-dep-lib.sh single . libssh2
pushd libssh2
mkdir -p build
cd build
cmake "$OLDER_MAC_CMAKE" -DCMAKE_INSTALL_PREFIX="$INSTALL_ROOT" -DCMAKE_BUILD_TYPE=$DEBUGRELEASE -DBUILD_SHARED_LIBS=ON -DBUILD_TESTING=OFF -DBUILD_EXAMPLES=OFF ..
make -j4
make install
popd
if [ "$PLATFORM" = Darwin ] ; then
# in order for macdeployqt to do its job correctly, we need the full path in the dylib ID
cd "$INSTALL_ROOT"/lib
NAME=$(otool -L libssh2.dylib | grep -v : | head -1 | cut -f1 -d\ | tr -d '\t')
echo "$NAME" | if grep -v / > /dev/null 2>&1 ; then
install_name_tool -id "$INSTALL_ROOT/lib/$NAME" "$INSTALL_ROOT/lib/$NAME"
fi
fi
fi
if [[ "$LIBGITMAJ" -lt "1" && "$LIBGIT" -lt "26" ]] ; then
LIBGIT_ARGS=" -DLIBGIT2_INCLUDE_DIR=$INSTALL_ROOT/include -DLIBGIT2_LIBRARIES=$INSTALL_ROOT/lib/libgit2.$SH_LIB_EXT "
cd "$SRC"
./${SRC_DIR}/scripts/get-dep-lib.sh single . libgit2
pushd libgit2
mkdir -p build
cd build
cmake "$OLDER_MAC_CMAKE" -DCMAKE_INSTALL_PREFIX="$INSTALL_ROOT" -DCMAKE_BUILD_TYPE="$DEBUGRELEASE" -DBUILD_CLAR=OFF ..
make -j4
make install
popd
if [ "$PLATFORM" = Darwin ] ; then
# in order for macdeployqt to do its job correctly, we need the full path in the dylib ID
cd "$INSTALL_ROOT/lib"
NAME=$(otool -L libgit2.dylib | grep -v : | head -1 | cut -f1 -d\ | tr -d '\t')
echo "$NAME" | if grep -v / > /dev/null 2>&1 ; then
install_name_tool -id "$INSTALL_ROOT/lib/$NAME" "$INSTALL_ROOT/lib/$NAME"
fi
fi
fi
if [[ $PLATFORM = Darwin && "$BUILD_DEPS" == "1" ]] ; then
# when building distributable binaries on a Mac, we cannot rely on anything from Homebrew,
# because that always requires the latest OS (how stupid is that - and they consider it a
# feature). So we painfully need to build the dependencies ourselves.
cd "$SRC"
./${SRC_DIR}/scripts/get-dep-lib.sh single . libzip
pushd libzip
mkdir -p build
cd build
cmake "$OLDER_MAC_CMAKE" -DCMAKE_BUILD_TYPE="$DEBUGRELEASE" \
-DCMAKE_INSTALL_PREFIX="$INSTALL_ROOT" \
..
make -j4
make install
popd
./${SRC_DIR}/scripts/get-dep-lib.sh single . hidapi
pushd hidapi
# there is no good tag, so just build master
bash ./bootstrap
mkdir -p build
cd build
CFLAGS="$OLDER_MAC" ../configure --prefix="$INSTALL_ROOT"
make -j4
make install
popd
./${SRC_DIR}/scripts/get-dep-lib.sh single . libusb
pushd libusb
bash ./bootstrap.sh
mkdir -p build
cd build
CFLAGS="$OLDER_MAC" ../configure --prefix="$INSTALL_ROOT" --disable-examples
make -j4
make install
popd
./${SRC_DIR}/scripts/get-dep-lib.sh single . libmtp
pushd libmtp
echo 'N' | NOCONFIGURE="1" bash ./autogen.sh
mkdir -p build
cd build
CFLAGS="$OLDER_MAC" ../configure --prefix="$INSTALL_ROOT"
make -j4
make install
popd
./${SRC_DIR}/scripts/get-dep-lib.sh single . libftdi1
pushd libftdi1
mkdir -p build
cd build
cmake "$OLDER_MAC_CMAKE" -DCMAKE_BUILD_TYPE="$DEBUGRELEASE" \
-DCMAKE_INSTALL_PREFIX="$INSTALL_ROOT" \
..
make -j4
make install
popd
fi
cd "$SRC"
# build libdivecomputer
cd ${SRC_DIR}
if [ ! -d libdivecomputer/src ] ; then
git submodule init
git submodule update --recursive
fi
mkdir -p "${BUILD_PREFIX}libdivecomputer/build"
cd "${BUILD_PREFIX}libdivecomputer/build"
if [ ! -f "$SRC"/${SRC_DIR}/libdivecomputer/configure ] ; then
# this is not a typo
# in some scenarios it appears that autoreconf doesn't copy the
# ltmain.sh file; running it twice, however, fixes that problem
autoreconf --install "$SRC"/${SRC_DIR}/libdivecomputer
autoreconf --install "$SRC"/${SRC_DIR}/libdivecomputer
fi
CFLAGS="$OLDER_MAC -I$INSTALL_ROOT/include $LIBDC_CFLAGS" "$SRC"/${SRC_DIR}/libdivecomputer/configure --prefix="$INSTALL_ROOT" --disable-examples
if [ "$PLATFORM" = Darwin ] ; then
# remove some copmpiler options that aren't supported on Mac
# otherwise the log gets very noisy
for i in $(find . -name Makefile)
do
sed -i .bak 's/-Wrestrict//;s/-Wno-unused-but-set-variable//' "$i"
done
# it seems that on my Mac some of the configure tests for libdivecomputer
# pass even though the feature tested for is actually missing
# let's hack around that
# touch config.status, recreate config.h and then disable HAVE_CLOCK_GETTIME
# this seems to work so that the Makefile doesn't re-run the
# configure process and overwrite all the changes we just made
touch config.status
make config.h
grep CLOCK_GETTIME config.h
sed -i .bak 's/^#define HAVE_CLOCK_GETTIME 1/#undef HAVE_CLOCK_GETTIME /' config.h
fi
make -j4
make install
# make sure we know where the libdivecomputer.a was installed - sometimes it ends up in lib64, sometimes in lib
STATIC_LIBDC="$INSTALL_ROOT/$(grep ^libdir Makefile | cut -d/ -f2)/libdivecomputer.a"
cd "$SRC"
if [ "$QUICK" != "1" ] && [ "$BUILD_DESKTOP$BUILD_MOBILE" != "" ] ; then
# build the googlemaps map plugin
cd "$SRC"
./${SRC_DIR}/scripts/get-dep-lib.sh single . googlemaps
pushd googlemaps
mkdir -p build
mkdir -p J10build
cd build
$QMAKE "INCLUDEPATH=$INSTALL_ROOT/include" ../googlemaps.pro
# on Travis the compiler doesn't support c++1z, yet qmake adds that flag;
# since things compile fine with c++11, let's just hack that away
# similarly, don't use -Wdata-time
if [ "$TRAVIS" = "true" ] ; then
mv Makefile Makefile.bak
cat Makefile.bak | sed -e 's/std=c++1z/std=c++11/g ; s/-Wdate-time//' > Makefile
fi
make -j4
make install
popd
fi
# finally, build Subsurface
set -x
for (( i=0 ; i < ${#BUILDS[@]} ; i++ )) ; do
SUBSURFACE_EXECUTABLE=${BUILDS[$i]}
BUILDDIR=${BUILDDIRS[$i]}
echo "build $SUBSURFACE_EXECUTABLE in $BUILDDIR"
if [ "$SUBSURFACE_EXECUTABLE" = "DesktopExecutable" ] && [ "$BUILD_WITH_WEBKIT" = "1" ]; then
EXTRA_OPTS="-DNO_USERMANUAL=OFF -DNO_PRINTING=OFF"
else
EXTRA_OPTS="-DNO_USERMANUAL=ON -DNO_PRINTING=ON"
fi
cd "$SRC"/${SRC_DIR}
# pull the plasma-mobile components from upstream if building Subsurface-mobile
if [ "$SUBSURFACE_EXECUTABLE" = "MobileExecutable" ] ; then
bash ./scripts/mobilecomponents.sh
EXTRA_OPTS="$EXTRA_OPTS -DECM_DIR=$SRC/$SRC_DIR/mobile-widgets/3rdparty/ECM"
fi
mkdir -p "$BUILDDIR"
cd "$BUILDDIR"
export CMAKE_PREFIX_PATH="$INSTALL_ROOT/lib/cmake;${CMAKE_PREFIX_PATH}"
cmake -DCMAKE_BUILD_TYPE="$DEBUGRELEASE" \
-DSUBSURFACE_TARGET_EXECUTABLE="$SUBSURFACE_EXECUTABLE" \
"$LIBGIT_ARGS" \
-DLIBDIVECOMPUTER_INCLUDE_DIR="$INSTALL_ROOT"/include \
-DLIBDIVECOMPUTER_LIBRARIES="$STATIC_LIBDC" \
-DCMAKE_PREFIX_PATH="$CMAKE_PREFIX_PATH" \
-DBTSUPPORT="$BTSUPPORT" \
-DCMAKE_INSTALL_PREFIX="$INSTALL_ROOT" \
$LIBGIT2_FROM_PKGCONFIG \
-DFORCE_LIBSSH=OFF \
$EXTRA_OPTS \
"$SRC"/${SRC_DIR}
if [ "$PLATFORM" = Darwin ] ; then
rm -rf Subsurface.app
rm -rf Subsurface-mobile.app
fi
LIBRARY_PATH=$INSTALL_ROOT/lib make -j4
LIBRARY_PATH=$INSTALL_ROOT/lib make install
if [ "$CREATE_APPDIR" = "1" ] ; then
# if we create an AppImage this makes gives us a sane starting point
cd "$SRC"
mkdir -p ./appdir
mkdir -p appdir/usr/share/metainfo
mkdir -p appdir/usr/share/icons/hicolor/256x256/apps
cp -r ./install-root/* ./appdir/usr
cp ${SRC_DIR}/appdata/subsurface.appdata.xml appdir/usr/share/metainfo/
cp ${SRC_DIR}/icons/subsurface-icon.png appdir/usr/share/icons/hicolor/256x256/apps/
fi
done
|
janmulder/subsurface
|
scripts/build.sh
|
Shell
|
gpl-2.0
| 20,037 |
echo
echo "setting up postfix for maia spam/virus filtering"
echo
cp /etc/postfix/main.cf /etc/postfix/main.cf-save-$$
cp /etc/postfix/master.cf /etc/postfix/master.cf-save-$$
cat master.cf-append >> /etc/postfix/master.cf
postconf -e inet_interfaces=all
postconf -e content_filter=maia:[127.0.0.1]:10024
#hostname=`grep HOST installer.tmpl | awk -F\= '{ print $2 }'`
hostname=`grep FQDN installer.tmpl | awk -F\= '{ print $2 }'`
domain=`grep DOMAIN installer.tmpl | awk -F\= '{ print $2 }'`
postconf -e myhostname=${hostname}
postconf -e mydomain=${domain}
# do we need to add a relayhost?
relayhost=`grep RELAY installer.tmpl | awk -F\= '{ print $2 }'`
addrelay=`echo $relayhost | wc -l`
[ $addrelay ] && postconf -e relayhost=$relayhost
# the calling script needs to restart postfix after this returns
|
einheit/mailguard_install_support
|
postfix-setup.sh
|
Shell
|
gpl-2.0
| 811 |
# (c) 2014-2015 Sam Nazarko
# [email protected]
#!/bin/bash
. ../common.sh
VERSION="1.28"
pull_source "https://www.kernel.org/pub/linux/network/connman/connman-${VERSION}.tar.gz" "$(pwd)/src"
if [ $? != 0 ]; then echo -e "Error fetching connman source" && exit 1; fi
# Build in native environment
build_in_env "${1}" $(pwd) "connman-osmc"
if [ $? == 0 ]
then
echo -e "Building package connman"
out=$(pwd)/files
make clean
update_sources
handle_dep "xtables-addons-source"
handle_dep "libreadline-dev"
handle_dep "libdbus-1-dev"
handle_dep "wpasupplicant"
handle_dep "iptables"
handle_dep "libgnutls28-dev"
handle_dep "libglib2.0-dev"
sed '/Package/d' -i files/DEBIAN/control
echo "Package: ${1}-connman-osmc" >> files/DEBIAN/control
pushd src/connman-$VERSION
install_patch "../../patches" "all"
./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var
if [ $? != 0 ]; then echo -e "Configure failed!" && umount /proc/ > /dev/null 2>&1 && exit 1; fi
$BUILD
if [ $? != 0 ]; then echo -e "Build failed!" && exit 1; fi
make install DESTDIR=${out}
mkdir -p ${out}/etc/dbus-1/system.d
mkdir -p ${out}/usr/share/polkit-1/actions
cp -ar src/connman-dbus-osmc.conf ${out}/etc/dbus-1/system.d/connman-dbus.conf
cp -ar plugins/polkit.policy ${out}/usr/share/polkit-1/actions/net.connman.policy
cp -ar client/connmanctl ${out}/usr/sbin/connmanctl
popd
strip_files "${out}"
fix_arch_ctl "files/DEBIAN/control"
dpkg -b files/ connman-osmc.deb
fi
teardown_env "${1}"
|
indie1982/osmc-fixes
|
package/connman-osmc/build.sh
|
Shell
|
gpl-2.0
| 1,500 |
#!/bin/bash
# Use this script to recovery form your bricked abox_edge device
set -e
GETOPT=$(which getopt)
UNZIP=$(which unzip)
FLASH_CONFIG="IAP140_Trusted_lpddr3_1g_discrete_667Mhz_Brillo.blf"
SPARSE_IMAGES="system.img cache.img userdata.img teesst.img"
SPARSE_TOOL="sparse_converter"
FLASH_TOOL="swdl_linux"
LOCAL_DIR=$(pwd)
function print_help()
{
cat << EOF
Usage: ${0##*/} [options]
-h, --help display this help
-f, --file <filename> zip file for flashing
EOF
exit 1
}
function do_flashing()
{
local zip_file="$1"
local img=""
local blf_file="${LOCAL_DIR}/${FLASH_CONFIG}"
local sparse_tool="${LOCAL_DIR}/${SPARSE_TOOL}"
local flash_tool="${LOCAL_DIR}/${FLASH_TOOL}"
if [ ! -e "$blf_file" ]; then
echo "BLF file ${blf_file} not exist..."
exit 1
fi
local tmp_dir=$(mktemp -dt "abox.XXXXXX")
${UNZIP} -o -q -d ${tmp_dir} ${zip_file}
if [ $? -ne 0 ]; then
rm -rf ${tmp_dir}
echo "unzip ${zip_file} error..."
fi
cp -ab "${blf_file}" ${tmp_dir}
cp -ab "${sparse_tool}" ${tmp_dir}
cp -ab "${flash_tool}" ${tmp_dir}
cd ${tmp_dir}
# Convert the image to be compatible with the flash tool
mkdir -p sparse
mv ${SPARSE_IMAGES} sparse/
for img in ${SPARSE_IMAGES}; do
./${SPARSE_TOOL} ./sparse/${img} ${img} > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Sparse image ${img} convert error..."
rm -rf {tmp_dir}
exit 1
fi
done
# Start flashing
sudo ./${FLASH_TOOL} -D ${FLASH_CONFIG} -S
if [ $? -ne 0 ]; then
echo "Flash error, refer to ${tmp_dir} for error logs..."
else
echo "Flash success..."
rm -rf ${tmp_dir}
fi
}
if [ $# -lt 1 ]; then
print_help
fi
case "$1" in
-h|--help)
print_help
;;
-f|--file)
if [ $# -lt 2 ]; then
print_help
fi
do_flashing "$2"
;;
*)
print_help
;;
esac
exit 0
|
triplekiller/algo
|
shell/brillo-flashall-abox_edge.sh
|
Shell
|
gpl-2.0
| 1,769 |
#!/bin/bash
set -e
pegasus_lite_version_major="4"
pegasus_lite_version_minor="7"
pegasus_lite_version_patch="0"
pegasus_lite_enforce_strict_wp_check="true"
pegasus_lite_version_allow_wp_auto_download="true"
. pegasus-lite-common.sh
pegasus_lite_init
# cleanup in case of failures
trap pegasus_lite_signal_int INT
trap pegasus_lite_signal_term TERM
trap pegasus_lite_exit EXIT
echo -e "\n################################ Setting up workdir ################################" 1>&2
# work dir
export pegasus_lite_work_dir=$PWD
pegasus_lite_setup_work_dir
echo -e "\n###################### figuring out the worker package to use ######################" 1>&2
# figure out the worker package to use
pegasus_lite_worker_package
echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2
# set the xbit for any executables staged
/bin/chmod +x example_workflow-averageratioevent_0-1.0
echo -e "\n############################# executing the user tasks #############################" 1>&2
# execute the tasks
set +e
pegasus-kickstart -n example_workflow::averageratioevent_0:1.0 -N ID0000024 -R condorpool -L example_workflow -T 2016-11-07T21:00:01+00:00 ./example_workflow-averageratioevent_0-1.0
job_ec=$?
set -e
|
elainenaomi/sciwonc-dataflow-examples
|
dissertation2017/Experiment 1A/logs/w-11_1/20161107T210002+0000/00/00/averageratioevent_0_ID0000024.sh
|
Shell
|
gpl-3.0
| 1,261 |
#!/bin/bash
# Just a little script that I use to download and update the /etc/hosts file
# It started as a 5 line script but it evolved. Anyway it was fun !
# Run this script with administrative privilege
# Get the hosts.txt file
cd /etc
echo "Changing directory to /etc"
# Making sure the PWD is /etc
if [ $PWD != /etc ]
then echo "Make sure you have administrative rights"
exit
else
echo "Checking if file hosts.txt already exists"
if [ -e $PWD/hosts.txt ]
then echo "File hosts.txt already exists. Please rename it, move it or delete it."
read -p "Would you like to automaticaly rename it under hosts.txt-bkp-$(date +%y%m%d) name? Y/N
> " auto_ren # puts the answer on a new line
case $auto_ren in
n|N|[nN][oO])
echo "Exiting"
exit
;;
y|Y|[yY][eE][sS])
echo "Renaming file"
new_hosts=$PWD/hosts.txt-bkp-$(date +%y%m%d)
mv $PWD/hosts.txt $new_hosts
if [ -e $new_hosts ]
then echo "Backup Complete"
else echo "Backup Failed! Exiting"; exit
sleep 3
fi
;;
*)
echo "Exiting"; exit
;;
esac
fi
echo "Starting download..."
sleep 3
echo "Downloading the hosts.txt from https://winhelp2002.mvps.org/hosts.txt"
wget http://winhelp2002.mvps.org/hosts.txt
if [ -e $PWD/hosts.txt ]
then
echo "File downloaded succesfully"
#Backup the old hosts file
echo "Making a backup of the old $PWD/hosts"
sleep 3
old_hosts=hosts.OLD_$(date +%y%m%d)
cp /etc/hosts /etc/$old_hosts
if [ -e $PWD/$old_hosts ]
then echo "Backup complete"; sleep 1
else echo "Backup Failed! Removing downloaded file."
rm $PWD/hosts.txt
echo "Exiting now."; exit
fi
#Add the contents of the downloaded hosts.txt file to /etc/hosts
echo "Appending the content of the newly downloaded hosts.txt to the $PWD/hosts file"
sleep 1
cat /etc/hosts.txt >> /etc/hosts
cat /etc/hosts | tr -d '\r' > /etc/hosts; echo "Done!"; sleep 2
echo "All done! Exiting now."; exit
else
echo "Make sure you have administrative rights"; exit
fi
fi
|
dracgol/shell-scripts
|
get&install-hosts.sh
|
Shell
|
gpl-3.0
| 2,430 |
#!/bin/bash
# T&M Hansson IT AB © - 2021, https://www.hanssonit.se/
# GNU General Public License v3.0
# https://github.com/nextcloud/vm/blob/master/LICENSE
#########
IRed='\e[0;91m' # Red
IGreen='\e[0;92m' # Green
ICyan='\e[0;96m' # Cyan
Color_Off='\e[0m' # Text Reset
print_text_in_color() {
printf "%b%s%b\n" "$1" "$2" "$Color_Off"
}
print_text_in_color "$ICyan" "Fetching all the variables from lib.sh..."
is_process_running() {
PROCESS="$1"
while :
do
RESULT=$(pgrep "${PROCESS}")
if [ "${RESULT:-null}" = null ]; then
break
else
print_text_in_color "$ICyan" "${PROCESS} is running, waiting for it to stop..."
sleep 10
fi
done
}
#########
# Check if dpkg or apt is running
is_process_running apt
is_process_running dpkg
true
SCRIPT_NAME="Nextcloud Startup Script"
# shellcheck source=lib.sh
source /var/scripts/fetch_lib.sh
# Get all needed variables from the library
ncdb
# Check if root
root_check
# Create a snapshot before modifying anything
check_free_space
if does_snapshot_exist "NcVM-installation" || [ "$FREE_SPACE" -ge 50 ]
then
if does_snapshot_exist "NcVM-installation"
then
check_command lvremove /dev/ubuntu-vg/NcVM-installation -y
fi
if ! lvcreate --size 5G --snapshot --name "NcVM-startup" /dev/ubuntu-vg/ubuntu-lv
then
msg_box "The creation of a snapshot failed.
If you just merged and old one, please reboot your server once more.
It should work afterwards again."
exit 1
fi
fi
# Check network
if network_ok
then
print_text_in_color "$IGreen" "Online!"
else
print_text_in_color "$ICyan" "Setting correct interface..."
[ -z "$IFACE" ] && IFACE=$(lshw -c network | grep "logical name" | awk '{print $3; exit}')
# Set correct interface
cat <<-SETDHCP > "/etc/netplan/01-netcfg.yaml"
network:
version: 2
renderer: networkd
ethernets:
$IFACE:
dhcp4: true
dhcp6: true
SETDHCP
check_command netplan apply
print_text_in_color "$ICyan" "Checking connection..."
sleep 1
set_systemd_resolved_dns "$IFACE"
if ! nslookup github.com
then
msg_box "The script failed to get an address from DHCP.
You must have a working network connection to run this script.
You will now be provided with the option to set a static IP manually instead."
# Run static_ip script
bash /var/scripts/static_ip.sh
fi
fi
# Check network again
if network_ok
then
print_text_in_color "$IGreen" "Online!"
elif home_sme_server
then
msg_box "It seems like the last try failed as well using LAN ethernet.
Since the Home/SME server is equipped with a Wi-Fi module, you will now be asked to enable it to get connectivity.
Please note: It's not recommended to run a server on Wi-Fi; using an ethernet cable is always the best."
if yesno_box_yes "Do you want to enable Wi-Fi on this server?"
then
install_if_not network-manager
nmtui
fi
if network_ok
then
print_text_in_color "$IGreen" "Online!"
else
msg_box "Network is NOT OK. You must have a working network connection to run this script.
Please contact us for support:
https://shop.hanssonit.se/product/premium-support-per-30-minutes/
Please also post this issue on: https://github.com/nextcloud/vm/issues"
exit 1
fi
else
msg_box "Network is NOT OK. You must have a working network connection to run this script.
Please contact us for support:
https://shop.hanssonit.se/product/premium-support-per-30-minutes/
Please also post this issue on: https://github.com/nextcloud/vm/issues"
exit 1
fi
# Check that this run on the PostgreSQL VM
if ! is_this_installed postgresql-common
then
print_text_in_color "$IRed" "This script is intended to be \
run using a PostgreSQL database, but PostgreSQL is not installed."
print_text_in_color "$IRed" "Aborting..."
exit 1
fi
# Run the startup menu
run_script MENU startup_configuration
true
SCRIPT_NAME="Nextcloud Startup Script"
# shellcheck source=lib.sh
source /var/scripts/fetch_lib.sh
# Get all needed variables from the library
ncdb
nc_update
# Check for errors + debug code and abort if something isn't right
# 1 = ON
# 0 = OFF
DEBUG=0
debug_mode
# Nextcloud 21 is required
lowest_compatible_nc 21
# Add temporary fix if needed
if network_ok
then
run_script STATIC temporary-fix-begining
fi
# Import if missing and export again to import it with UUID
zpool_import_if_missing
# Set phone region (needs the latest KEYBOARD_LAYOUT from lib)
# shellcheck source=lib.sh
source /var/scripts/fetch_lib.sh
if [ -n "$KEYBOARD_LAYOUT" ]
then
nextcloud_occ config:system:set default_phone_region --value="$KEYBOARD_LAYOUT"
fi
# Is this run as a pure root user?
if is_root
then
if [[ "$UNIXUSER" == "ncadmin" ]]
then
sleep 1
else
if [ -z "$UNIXUSER" ]
then
msg_box "You seem to be running this as the root user.
You must run this as a regular user with sudo permissions.
Please create a user with sudo permissions and the run this command:
sudo -u [user-with-sudo-permissions] sudo bash /var/scripts/nextcloud-startup-script.sh
We will do this for you when you hit OK."
download_script STATIC adduser
bash $SCRIPTS/adduser.sh "$SCRIPTS/nextcloud-startup-script.sh"
rm $SCRIPTS/adduser.sh
else
msg_box "You probably see this message if the user 'ncadmin' does not exist on the system,
which could be the case if you are running directly from the scripts on Github and not the VM.
As long as the user you created have sudo permissions it's safe to continue.
This would be the case if you created a new user with the script in the previous step.
If the user you are running this script with is a user that doesn't have sudo permissions,
please abort this script and report this issue to $ISSUES."
if yesno_box_yes "Do you want to abort this script?"
then
exit
fi
fi
fi
fi
######## The first setup is OK to run to this point several times, but not any further ########
if [ -f "$SCRIPTS/you-can-not-run-the-startup-script-several-times" ]
then
msg_box "The $SCRIPT_NAME script that handles this first setup \
is designed to be run once, not several times in a row.
If you feel uncertain about adding some extra features during this setup, \
then it's best to wait until after the first setup is done. You can always add all the extra features later.
[For the Nextcloud VM:]
Please delete this VM from your host and reimport it once again, then run this setup like you did the first time.
[For the Nextcloud Home/SME Server:]
It's a bit trickier since you can't revert in the same way as a VM. \
The best thing you can do now is to save all the output from the session you \
ran before this one + write down all the steps you took and send and email to:
[email protected] with the subject 'Issues with first setup', and we'll take it from there.
Full documentation can be found here: https://docs.hanssonit.se
Please report any bugs you find here: $ISSUES"
exit 1
fi
touch "$SCRIPTS/you-can-not-run-the-startup-script-several-times"
if home_sme_server
then
download_script STATIC nhss_index
mv $SCRIPTS/nhss_index.php $HTML/index.php && rm -f $HTML/html/index.html
chmod 750 $HTML/index.php && chown www-data:www-data $HTML/index.php
else
download_script STATIC index
mv $SCRIPTS/index.php $HTML/index.php && rm -f $HTML/html/index.html
chmod 750 $HTML/index.php && chown www-data:www-data $HTML/index.php
fi
# Change 000-default to $WEB_ROOT
sed -i "s|DocumentRoot /var/www/html|DocumentRoot $HTML|g" /etc/apache2/sites-available/000-default.conf
# Make possible to see the welcome screen (without this php-fpm won't reach it)
sed -i '14i\ # http://lost.l-w.ca/0x05/apache-mod_proxy_fcgi-and-php-fpm/' /etc/apache2/sites-available/000-default.conf
sed -i '15i\ <FilesMatch "\.php$">' /etc/apache2/sites-available/000-default.conf
sed -i '16i\ <If "-f %{SCRIPT_FILENAME}">' /etc/apache2/sites-available/000-default.conf
sed -i '17i\ SetHandler "proxy:unix:/run/php/php'$PHPVER'-fpm.nextcloud.sock|fcgi://localhost"' /etc/apache2/sites-available/000-default.conf
sed -i '18i\ </If>' /etc/apache2/sites-available/000-default.conf
sed -i '19i\ </FilesMatch>' /etc/apache2/sites-available/000-default.conf
sed -i '20i\ ' /etc/apache2/sites-available/000-default.conf
# Allow $UNIXUSER to run figlet script
chown "$UNIXUSER":"$UNIXUSER" "$SCRIPTS/nextcloud.sh"
msg_box "This script will configure your Nextcloud and activate TLS.
It will also do the following:
- Generate new SSH keys for the server
- Generate new PostgreSQL password
- Install selected apps and automatically configure them
- Detect and set hostname
- Detect and set trusted domains
- Upgrade your system and Nextcloud to latest version
- Set secure permissions to Nextcloud
- Set new passwords to Linux and Nextcloud
- Change timezone
- Set correct Rewriterules for Nextcloud
- Copy content from .htaccess to .user.ini (because we use php-fpm)
- Add additional options if you choose them
- And more..."
msg_box "PLEASE NOTE:
[#] Please finish the whole setup. The server will reboot once done.
[#] Please read the on-screen instructions carefully, they will guide you through the setup.
[#] When complete it will delete all the *.sh, *.html, *.tar, *.zip inside:
/root
/home/$UNIXUSER
[#] Please consider donating if you like the product:
https://shop.hanssonit.se/product-category/donate/
[#] You can also ask for help here:
https://help.nextcloud.com/c/support/appliances-docker-snappy-vm
https://shop.hanssonit.se/product/premium-support-per-30-minutes/"
msg_box "PLEASE NOTE:
The first setup is meant to be run once, and not aborted.
If you feel uncertain about the options during the setup, just choose the defaults by hitting [ENTER] at each question.
When the setup is done, the server will automatically reboot.
Please report any issues to: $ISSUES"
# Change timezone in PHP
sed -i "s|;date.timezone.*|date.timezone = $(cat /etc/timezone)|g" "$PHP_INI"
# Change timezone for logging
nextcloud_occ config:system:set logtimezone --value="$(cat /etc/timezone)"
# Pretty URLs
print_text_in_color "$ICyan" "Setting RewriteBase to \"/\" in config.php..."
chown -R www-data:www-data $NCPATH
nextcloud_occ config:system:set overwrite.cli.url --value="http://localhost/"
nextcloud_occ config:system:set htaccess.RewriteBase --value="/"
nextcloud_occ maintenance:update:htaccess
bash $SECURE & spinner_loading
# Generate new SSH Keys
printf "\nGenerating new SSH keys for the server...\n"
rm -v /etc/ssh/ssh_host_*
dpkg-reconfigure openssh-server
# Generate new PostgreSQL password
print_text_in_color "$ICyan" "Generating new PostgreSQL password..."
check_command bash "$SCRIPTS/change_db_pass.sh"
sleep 3
# Server configurations
bash $SCRIPTS/server_configuration.sh
# Nextcloud configuration
bash $SCRIPTS/nextcloud_configuration.sh
# Install apps
bash $SCRIPTS/additional_apps.sh
### Change passwords
# CLI USER
msg_box "For better security, we will now change the password for the CLI user in Ubuntu."
UNIXUSER="$(getent group sudo | cut -d: -f4 | cut -d, -f1)"
while :
do
UNIX_PASSWORD=$(input_box_flow "Please type in the new password for the current CLI user in Ubuntu: $UNIXUSER.")
if [[ "$UNIX_PASSWORD" == *" "* ]]
then
msg_box "Please don't use spaces."
else
break
fi
done
if check_command echo "$UNIXUSER:$UNIX_PASSWORD" | sudo chpasswd
then
msg_box "The new password for the current CLI user in Ubuntu ($UNIXUSER) is now set to: $UNIX_PASSWORD
This is used when you login to the Ubuntu CLI."
fi
unset UNIX_PASSWORD
# NEXTCLOUD USER
NCADMIN=$(nextcloud_occ user:list | awk '{print $3}')
msg_box "We will now change the username and password for the Web Admin in Nextcloud."
while :
do
NEWUSER=$(input_box_flow "Please type in the name of the Web Admin in Nextcloud.
It must differ from the current one: $NCADMIN.\n\nThe only allowed characters for the username are:
'a-z', 'A-Z', '0-9', and '_.@-'")
if [[ "$NEWUSER" == *" "* ]]
then
msg_box "Please don't use spaces."
elif [ "$NEWUSER" = "$NCADMIN" ]
then
msg_box "This username ($NCADMIN) is already in use. Please choose a different one."
# - has to be escaped otherwise it won't work.
# Inspired by: https://unix.stackexchange.com/a/498731/433213
elif [ "${NEWUSER//[A-Za-z0-9_.\-@]}" ]
then
msg_box "Allowed characters for the username are:\na-z', 'A-Z', '0-9', and '_.@-'\n\nPlease try again."
else
break
fi
done
while :
do
OC_PASS=$(input_box_flow "Please type in the new password for the new Web Admin ($NEWUSER) in Nextcloud.")
if [[ "$OC_PASS" == *" "* ]]
then
msg_box "Please don't use spaces."
fi
# Create new user
export OC_PASS
if su -s /bin/sh www-data -c "php $NCPATH/occ user:add $NEWUSER --password-from-env -g admin"
then
msg_box "The new Web Admin in Nextcloud is now: $NEWUSER\nThe password is set to: $OC_PASS
This is used when you login to Nextcloud itself, i.e. on the web."
unset OC_PASS
break
else
any_key "Press any key to choose a different password."
fi
done
# Delete old user
if [[ "$NCADMIN" ]]
then
print_text_in_color "$ICyan" "Deleting $NCADMIN..."
nextcloud_occ user:delete "$NCADMIN"
sleep 2
fi
# We need to unset the cached admin-user since we have changed its name
unset NC_ADMIN_USER
msg_box "Well done, you have now finished most of the setup.
There are still a few steps left but they are automated so sit back and relax! :)"
# Add default notifications
notify_admin_gui \
"Please set up SMTP" \
"Please remember to set up SMTP to be able to send shared links, user notifications and more via email. \
Please go here and start setting it up: https://your-nextcloud/settings/admin."
notify_admin_gui \
"Do you need support?" \
"If you need support, please visit the shop: https://shop.hanssonit.se, or the forum: https://help.nextcloud.com."
if ! is_this_installed php"$PHPVER"-imagick
then
notify_admin_gui \
"Regarding Imagick not being installed" \
"As you may have noticed, Imagick is not installed. We care about your security, \
and here's the reason: https://github.com/nextcloud/server/issues/13099"
fi
# Fixes https://github.com/nextcloud/vm/issues/58
a2dismod status
restart_webserver
if home_sme_server
then
install_if_not bc
mem_available="$(awk '/MemTotal/{print $2}' /proc/meminfo)"
mem_available_gb="$(echo "scale=0; $mem_available/(1024*1024)" | bc)"
# 32 GB RAM
if [[ 30 -lt "${mem_available_gb}" ]]
then
# Add specific values to PHP-FPM based on 32 GB RAM
check_command sed -i "s|pm.max_children.*|pm.max_children = 600|g" "$PHP_POOL_DIR"/nextcloud.conf
check_command sed -i "s|pm.start_servers.*|pm.start_servers = 100|g" "$PHP_POOL_DIR"/nextcloud.conf
check_command sed -i "s|pm.min_spare_servers.*|pm.min_spare_servers = 20|g" "$PHP_POOL_DIR"/nextcloud.conf
check_command sed -i "s|pm.max_spare_servers.*|pm.max_spare_servers = 480|g" "$PHP_POOL_DIR"/nextcloud.conf
restart_webserver
# 16 GB RAM
elif [[ 14 -lt "${mem_available_gb}" ]]
then
# Add specific values to PHP-FPM based on 16 GB RAM
check_command sed -i "s|pm.max_children.*|pm.max_children = 300|g" "$PHP_POOL_DIR"/nextcloud.conf
check_command sed -i "s|pm.start_servers.*|pm.start_servers = 50|g" "$PHP_POOL_DIR"/nextcloud.conf
check_command sed -i "s|pm.min_spare_servers.*|pm.min_spare_servers = 20|g" "$PHP_POOL_DIR"/nextcloud.conf
check_command sed -i "s|pm.max_spare_servers.*|pm.max_spare_servers = 280|g" "$PHP_POOL_DIR"/nextcloud.conf
restart_webserver
fi
else
# Calculate the values of PHP-FPM based on the amount of RAM available (minimum 2 GB or 8 children)
calculate_php_fpm
# Run again if values are reset on last run
calculate_php_fpm
fi
# Add temporary fix if needed
if network_ok
then
run_script STATIC temporary-fix-end
fi
# Cleanup 1
nextcloud_occ maintenance:repair
rm -f "$SCRIPTS/ip.sh"
rm -f "$SCRIPTS/change_db_pass.sh"
rm -f "$SCRIPTS/instruction.sh"
rm -f "$NCDATA/nextcloud.log"
rm -f "$SCRIPTS/static_ip.sh"
rm -f "$SCRIPTS/lib.sh"
rm -f "$SCRIPTS/server_configuration.sh"
rm -f "$SCRIPTS/nextcloud_configuration.sh"
rm -f "$SCRIPTS/additional_apps.sh"
rm -f "$SCRIPTS/adduser.sh"
rm -f "$SCRIPTS/activate-tls.sh"
rm -f "$SCRIPTS/desec_menu.sh"
rm -f "$NCDATA"/*.log
find /root "/home/$UNIXUSER" -type f \( -name '*.sh*' -o -name '*.html*' -o -name '*.tar*' -o -name 'results' -o -name '*.zip*' \) -delete
find "$NCPATH" -type f \( -name 'results' -o -name '*.sh*' \) -delete
sed -i "s|instruction.sh|nextcloud.sh|g" "/home/$UNIXUSER/.bash_profile"
truncate -s 0 \
/root/.bash_history \
"/home/$UNIXUSER/.bash_history" \
/var/spool/mail/root \
"/var/spool/mail/$UNIXUSER" \
/var/log/apache2/access.log \
/var/log/apache2/error.log \
"$VMLOGS/nextcloud.log"
sed -i "s|sudo -i||g" "$UNIXUSER_PROFILE"
cat << ROOTNEWPROFILE > "$ROOT_PROFILE"
# ~/.profile: executed by Bourne-compatible login shells.
if [ "/bin/bash" ]
then
if [ -f ~/.bashrc ]
then
. ~/.bashrc
fi
fi
if [ -x /var/scripts/nextcloud-startup-script.sh ]
then
/var/scripts/nextcloud-startup-script.sh
fi
if [ -x /var/scripts/history.sh ]
then
/var/scripts/history.sh
fi
mesg n
ROOTNEWPROFILE
# Set trusted domain in config.php
run_script NETWORK trusted
# Upgrade system
print_text_in_color "$ICyan" "System will now upgrade..."
bash $SCRIPTS/update.sh minor
# Cleanup 2
apt-get autoremove -y
apt-get autoclean
# Remove preference for IPv4
rm -f /etc/apt/apt.conf.d/99force-ipv4
apt-get update
# Success!
msg_box "The installation process is *almost* done.
Please hit OK in all the following prompts and let the server reboot to complete the installation process."
msg_box "TIPS & TRICKS:
1. Publish your server online: https://goo.gl/iUGE2U
2. To login to PostgreSQL just type: sudo -u postgres psql nextcloud_db
3. To update this server just type: sudo bash /var/scripts/update.sh
4. Install apps, configure Nextcloud, and server: sudo bash $SCRIPTS/menu.sh"
msg_box "SUPPORT:
Please ask for help in the forums, visit our shop to buy support,
or buy a yearly subscription from Nextcloud:
- SUPPORT: https://shop.hanssonit.se/product/premium-support-per-30-minutes/
- FORUM: https://help.nextcloud.com/
- SUBSCRIPTION: https://shop.hanssonit.se/product/nextcloud-enterprise-license-100-users/
BUGS:
Please report any bugs here: https://github.com/nextcloud/vm/issues"
msg_box "### PLEASE HIT OK TO REBOOT ###
Congratulations! You have successfully installed Nextcloud!
LOGIN:
Login to Nextcloud in your browser:
- IP: $ADDRESS
- Hostname: $(hostname -f)
### PLEASE HIT OK TO REBOOT ###"
# Reboot
print_text_in_color "$IGreen" "Installation done, system will now reboot..."
check_command rm -f "$SCRIPTS/you-can-not-run-the-startup-script-several-times"
check_command rm -f "$SCRIPTS/nextcloud-startup-script.sh"
reboot
|
nextcloud/vm
|
nextcloud-startup-script.sh
|
Shell
|
gpl-3.0
| 19,310 |
service supervisor restart
while true; do sleep 10; done
|
IGPla/systembuilder
|
templates/djangocelery/prod/docker/web/startup.sh
|
Shell
|
gpl-3.0
| 57 |
python jopy.py 北京
python jopy.py 上海
python jopy.py 杭州
python jopy.py 广州
python jopy.py 哈尔滨
python jopy.py 成都
python jopy.py 郑州
python jopy.py 深圳
python jopy.py 济南
python jopy.py 天津
|
qidouhai/jobla
|
jopy/run.sh
|
Shell
|
gpl-3.0
| 223 |
#!/bin/bash
#
# daemontest.sh: this file is part of the elfix package
# Copyright (C) 2011, 2012 Anthony G. Basile
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# dotest = 0 -> do only XATTR_PAX or PT_PAX test
# dotest = 1 -> do both
dotest=${1-0}
verbose=${2-0}
unamem=$(uname -m)
PWD=$(pwd)
INITSH="${PWD}"/init.sh
DAEMON="${PWD}"/daemon
PIDFILE="${PWD}"/daemon.pid
PAXCTLNG="../../src/paxctl-ng"
${PAXCTLNG} -cv ${DAEMON} 2>&1 1>/dev/null
count=0
echo "================================================================================"
echo
echo " RUNNIG DAEMON TEST"
echo
echo " NOTE:"
echo " 1) This test is only for amd64 and i686"
echo " 2) This test will fail on amd64 unless the following are enabled in the kernel:"
echo " CONFIG_PAX_PAGEEXEC"
echo " CONFIG_PAX_EMUTRAMP"
echo " CONFIG_PAX_MPROTECT"
echo " CONFIG_PAX_RANDMMAP"
echo " 3) This test will fail on i686 unless the following are enbled in the kernel:"
echo " CONFIG_PAX_EMUTRAMP"
echo " CONFIG_PAX_MPROTECT"
echo " CONFIG_PAX_RANDMMAP"
echo " CONFIG_PAX_SEGMEXEC"
echo
if [[ "$unamem" != "i686" && "$unamem" != "x86_64" ]]; then
echo "This test is only for i686 or x86_64"
echo
echo "================================================================================"
exit 0
fi
dots=0
for pf in "p" "P" "-"; do
for ef in "e" "E" "-"; do
for mf in "m" "M" "-"; do
for rf in "r" "R" "-"; do
for sf in "s" "S" "-"; do
pflags="${pf}${ef}${mf}${rf}${sf}"
if [[ "${verbose}" != 0 ]] ;then
echo "SET TO :" ${pflags}
fi
flags="${pf/-/Pp}${ef/-/Ee}${mf/-/Mm}${rf/-/Rr}${sf/-/Ss}"
${PAXCTLNG} -"${flags}" ${DAEMON} >/dev/null 2>&1
if [[ "${verbose}" != 0 ]] ;then
sflags=$(${PAXCTLNG} -v ${DAEMON})
if [[ "${dotest}" = "0" ]]; then
sflags=$(echo ${sflags} | awk '{print $3}')
echo "GOT :" ${sflags}
else
ptsflags=$(echo ${sflags} | awk '{print $3}')
xtsflags=$(echo ${sflags} | awk '{print $5}')
echo "PT_PAX :" ${ptsflags}
echo "XATTR_PAX :" ${xtsflags}
fi
fi
${INITSH} start
if [[ -f "${PIDFILE}" ]]
then
rflags=$(cat /proc/$(cat ${PIDFILE})/status | grep ^PaX | awk '{ print $2 }')
if [[ "${verbose}" != 0 ]] ;then
echo "RUNNING: "${rflags}
fi
${INITSH} stop
else
if [[ "${verbose}" != 0 ]] ;then
echo "RUNNING: no daemon"
fi
rflags="-----"
fi
if [[ "$unamem" = "i686" ]]; then
# Skip i = 0 which is P which is not set on i686
list="1 2 3 4"
else
# Skip i = 4 which is S which is not set on amd64
list="0 1 2 3"
fi
for i in $list; do
p=${pflags:$i:1}
r=${rflags:$i:1}
if [[ $p != "-" ]]; then
if [[ $p != $r && $r != "-" ]]; then
(( count = count + 1 ))
echo "Mismatch: ${pflags} ${rflags}"
fi
fi
done
if [[ "${verbose}" != 0 ]] ;then
echo
else
(( dots = dots + 1 ))
echo -n "."
if [[ "$dots" = "80" ]]; then
dots=0
echo
fi
fi
done
done
done
done
done
if [[ "${verbose}" = 0 ]] ;then
echo
echo
fi
echo " Mismatches = ${count}"
echo
echo "================================================================================"
exit $count
|
gentoo/elfix
|
tests/pxtpax/daemontest.sh
|
Shell
|
gpl-3.0
| 4,365 |
#!/bin/bash
# T&M Hansson IT AB © - 2021, https://www.hanssonit.se/
# Copyright © 2021 Simon Lindner (https://github.com/szaimen)
true
SCRIPT_NAME="NTFS Mount"
SCRIPT_EXPLAINER="This script automates mounting NTFS drives locally in your system."
# shellcheck source=lib.sh
source /var/scripts/fetch_lib.sh || source <(curl -sL https://raw.githubusercontent.com/nextcloud/vm/master/lib.sh)
# Check for errors + debug code and abort if something isn't right
# 1 = ON
# 0 = OFF
DEBUG=0
debug_mode
# Check if root
root_check
# Show explainer
msg_box "$SCRIPT_EXPLAINER"
# Mount drive
mount_drive() {
local UUIDS
local UUID
local LABEL
msg_box "Please disconnect your drive for now and connect it again AFTER you hit OK.
Otherwise we will not be able to detect it."
CURRENT_DRIVES=$(lsblk -o KNAME,TYPE | grep disk | awk '{print $1}')
count=0
while [ "$count" -lt 60 ]
do
print_text_in_color "$ICyan" "Please connect your drive now."
sleep 5 & spinner_loading
echo ""
NEW_DRIVES=$(lsblk -o KNAME,TYPE | grep disk | awk '{print $1}')
if [ "$CURRENT_DRIVES" = "$NEW_DRIVES" ]
then
count=$((count+5))
else
msg_box "A new drive was found. We will continue with the mounting now.
Please leave it connected."
break
fi
done
# Exit if no new drive was found
if [ "$count" -ge 60 ]
then
msg_box "No new drive found within 60 seconds.
Please run this option again if you want to try again."
return 1
fi
# Wait until the drive has spin up
countdown "Waiting for the drive to spin up..." 15
# Get all new drives
mapfile -t CURRENT_DRIVES <<< "$CURRENT_DRIVES"
for drive in "${CURRENT_DRIVES[@]}"
do
NEW_DRIVES=$(echo "$NEW_DRIVES" | grep -v "^$drive$")
done
# Partition menu
args=(whiptail --title "$TITLE" --menu \
"Please select the partition that you would like to mount.
$MENU_GUIDE" "$WT_HEIGHT" "$WT_WIDTH" 4)
# Get information that are important to show the partition menu
mapfile -t NEW_DRIVES <<< "$NEW_DRIVES"
for drive in "${NEW_DRIVES[@]}"
do
DRIVE_DESCRIPTION=$(lsblk -o NAME,VENDOR,MODEL | grep "^$drive" | awk '{print $2, $3}')
PARTITION_STATS=$(lsblk -o KNAME,FSTYPE,SIZE,UUID,LABEL | grep "^$drive" | grep -v "^$drive ")
unset PARTITIONS
mapfile -t PARTITIONS <<< "$(echo "$PARTITION_STATS" | awk '{print $1}')"
for partition in "${PARTITIONS[@]}"
do
STATS=$(echo "$PARTITION_STATS" | grep "^$partition ")
FSTYPE=$(echo "$STATS" | awk '{print $2}')
if [ "$FSTYPE" != "ntfs" ]
then
continue
fi
SIZE=$(echo "$STATS" | awk '{print $3}')
UUID=$(echo "$STATS" | awk '{print $4}')
if [ -z "$UUID" ]
then
continue
fi
LABEL=$(echo "$STATS" | awk '{print $5,$6,$7,$8,$9,$10,$11,$12}' | sed 's| |_|g' | sed -r 's|[_]+$||')
if ! grep -q "$UUID" /etc/fstab
then
args+=("$UUID" "$LABEL $DRIVE_DESCRIPTION $SIZE $FSTYPE")
UUIDS+="$UUID"
else
msg_box "The partition
$UUID $LABEL $DRIVE_DESCRIPTION $SIZE $FSTYPE
is already existing.\n
If you want to remove it, run the following two commands:
sudo sed -i '/$UUID/d' /etc/fstab
sudo reboot"
fi
done
done
# Check if at least one drive was found
if [ -z "$UUIDS" ]
then
msg_box "No drive found that can get mounted.
Most likely none is NTFS formatted."
return 1
fi
# Show the partition menu
UUID=$("${args[@]}" 3>&1 1>&2 2>&3)
if [ -z "$UUID" ]
then
return 1
fi
# Get the label of the partition
LABEL=$(lsblk -o UUID,LABEL | grep "^$UUID " | awk '{print $2,$3,$4,$5,$6,$7,$8,$9}' | sed 's| |_|g' | sed -r 's|[_]+$||')
if [ -z "$LABEL" ]
then
LABEL="partition-label"
fi
# Create plex user
if ! id plex &>/dev/null
then
check_command adduser --no-create-home --quiet --disabled-login --force-badname --gecos "" "plex"
fi
# Enter the mountpoint
while :
do
MOUNT_PATH=$(input_box_flow "Please type in the directory where you want to mount the partition.
One example is: '/mnt/$LABEL'
The directory has to start with '/mnt/'
If you want to cancel, type 'exit' and press [ENTER].")
if [ "$MOUNT_PATH" = "exit" ]
then
exit 1
elif echo "$MOUNT_PATH" | grep -q " "
then
msg_box "Please don't use spaces!"
elif ! echo "$MOUNT_PATH" | grep -q "^/mnt/"
then
msg_box "The directory has to stat with '/mnt/'"
elif grep -q " $MOUNT_PATH " /etc/fstab
then
msg_box "The mountpoint already exists in fstab. Please try a different one."
elif mountpoint -q "$MOUNT_PATH"
then
msg_box "The mountpoint is already mounted. Please try a different one."
elif echo "$MOUNT_PATH" | grep -q "^/mnt/ncdata"
then
msg_box "The directory isn't allowed to start with '/mnt/ncdata'"
elif echo "$MOUNT_PATH" | grep -q "^/mnt/smbshares"
then
msg_box "The directory isn't allowed to start with '/mnt/smbshares'"
else
echo "UUID=$UUID $MOUNT_PATH ntfs-3g \
windows_names,uid=plex,gid=plex,umask=007,nofail 0 0" >> /etc/fstab
mkdir -p "$MOUNT_PATH"
if ! mount "$MOUNT_PATH"
then
msg_box "The mount wasn't successful. Please try again."
sed -i "/$UUID/d" /etc/fstab
else
break
fi
fi
done
# Inform the user
msg_box "Congratulations! The mount was successful.
You can now access the partition here:
$MOUNT_PATH"
# Ask if this is a backup drive
if ! yesno_box_no "Is this drive meant to be a backup drive?
If you choose yes, it will only get mounted by a backup script \
and will restrict the read/write permissions to the root user."
then
# Test if Plex is installed
if is_docker_running && docker ps -a --format "{{.Names}}" | grep -q "^plex$"
then
# Reconfiguring Plex
msg_box "Plex Media Server found. We are now adjusting Plex to be able to use the new drive.
This can take a while. Please be patient!"
print_text_in_color "$ICyan" "Downloading the needed tool to get the current Plex config..."
docker pull assaflavie/runlike
echo '#/bin/bash' > /tmp/pms-conf
docker run --rm -v /var/run/docker.sock:/var/run/docker.sock assaflavie/runlike -p plex >> /tmp/pms-conf
if ! grep -q "$MOUNT_PATH:$MOUNT_PATH:ro" /tmp/pms-conf
then
MOUNT_PATH_SED="${MOUNT_PATH//\//\\/}"
sed -i "0,/--volume/s// -v $MOUNT_PATH_SED:$MOUNT_PATH_SED:ro \\\\\n&/" /tmp/pms-conf
docker stop plex
if ! docker rm plex
then
msg_box "Something failed while removing the old container."
return
fi
if ! bash /tmp/pms-conf
then
msg_box "Starting the new container failed. You can find the config here: '/tmp/pms-conf'"
return
fi
rm /tmp/pms-conf
msg_box "Plex was adjusted!"
else
rm /tmp/pms-conf
msg_box "No need to update Plex, since the drive is already mounted to Plex."
fi
fi
return
fi
# Execute the change to a backup drive
umount "$MOUNT_PATH"
sed -i "/$UUID/d" /etc/fstab
echo "UUID=$UUID $MOUNT_PATH ntfs-3g windows_names,uid=root,gid=root,umask=177,nofail,noauto 0 0" >> /etc/fstab
msg_box "Your Backup drive is ready."
}
# Show main_menu
while :
do
choice=$(whiptail --title "$TITLE" --menu \
"Choose what you want to do.
$MENU_GUIDE" "$WT_HEIGHT" "$WT_WIDTH" 4 \
"Mount a drive" "(Interactively mount a NTFS drive)" \
"Exit" "(Exit this script)" 3>&1 1>&2 2>&3)
case "$choice" in
"Mount a drive")
mount_drive
;;
"Exit")
break
;;
"")
break
;;
*)
;;
esac
done
exit
|
nextcloud/vm
|
not-supported/ntfs-mount.sh
|
Shell
|
gpl-3.0
| 7,796 |
#!/bin/sh
. "${TEST_SCRIPTS_DIR}/unit.sh"
define_test "missing config file"
setup
setup_ctdb_natgw <<EOF
192.168.1.21 leader
192.168.1.22
192.168.1.23
192.168.1.24
EOF
rm -f "$CTDB_NATGW_NODES"
required_result 1 <<EOF
error: CTDB_NATGW_NODES=${CTDB_NATGW_NODES} unreadable
EOF
for i in "startup" "ipreallocated" ; do
simple_test_event "$i"
done
|
kernevil/samba
|
ctdb/tests/UNIT/eventscripts/11.natgw.002.sh
|
Shell
|
gpl-3.0
| 356 |
#!/usr/bin/env bash
# Script to emulate OpenShift environment for testing
set -eu
if [ -z "${TMP:-}" ]; then
TMP=$( mktemp -d )
else
mkdir -p "${TMP}"
fi
export OPENSHIFT_LOG_DIR="${TMP}/LOG/"
test -e "${OPENSHIFT_LOG_DIR}" \
|| mkdir -p "${OPENSHIFT_LOG_DIR}"
export OPENSHIFT_DATA_DIR="${TMP}/DATA/"
test -e "${OPENSHIFT_DATA_DIR}" \
|| mkdir -p "${OPENSHIFT_DATA_DIR}"
export OPENSHIFT_REPO_DIR="${TMP}/REPO/"
test -e "${OPENSHIFT_REPO_DIR}" \
|| git clone . "${OPENSHIFT_REPO_DIR}"
export OPENSHIFT_CARTRIDGE_SDK_BASH="${TMP}/SDK"
cat > "${OPENSHIFT_CARTRIDGE_SDK_BASH}" <<\__EOF__
function set_env_var() {
mkdir -p "${3}"
echo "${2}" > ${3}/${1}
}
function client_result() {
echo "${@}"
}
__EOF__
### {{{ APP (PARTLY) SPECIFIC STUFF ###########################################
export OPENSHIFT_TOR_DIR="${TMP}/APP/"
test -e "${OPENSHIFT_TOR_DIR}" \
|| {
mkdir -p "${OPENSHIFT_TOR_DIR}"
cp -r bin usr "${OPENSHIFT_TOR_DIR}"
find . -name *.erb | xargs -n1 bash -c 'DEST=${OPENSHIFT_TOR_DIR}${1/.erb/}; mkdir -p "$( dirname "${DEST}" )"; erb ${1} > ${DEST}' --
}
for name in ${OPENSHIFT_TOR_DIR}/env/*; do
export $(basename "${name}")=$( cat "${name}" )
done
if ! [ -z "${OPENSHIFT_TOR_PATH_ELEMENT}" ]; then
PATH="${OPENSHIFT_TOR_PATH_ELEMENT}:${PATH}"
fi
### }}} APP (PARTLY) SPECIFIC STUFF ###########################################
"${@}"
echo "TMP=\"${TMP}\""
|
kauegimenes/shuffle-host
|
utils/run-like-in-openshift.sh
|
Shell
|
agpl-3.0
| 1,421 |
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
copy_dir()
{
local source="$1"
local destination="$2"
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" \"${source}*\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" "${source}"/* "${destination}"
}
SELECT_SLICE_RETVAL=""
select_slice() {
local paths=("$@")
# Locate the correct slice of the .xcframework for the current architectures
local target_path=""
# Split archs on space so we can find a slice that has all the needed archs
local target_archs=$(echo $ARCHS | tr " " "\n")
local target_variant=""
if [[ "$PLATFORM_NAME" == *"simulator" ]]; then
target_variant="simulator"
fi
if [[ ! -z ${EFFECTIVE_PLATFORM_NAME+x} && "$EFFECTIVE_PLATFORM_NAME" == *"maccatalyst" ]]; then
target_variant="maccatalyst"
fi
for i in ${!paths[@]}; do
local matched_all_archs="1"
for target_arch in $target_archs
do
if ! [[ "${paths[$i]}" == *"$target_variant"* ]]; then
matched_all_archs="0"
break
fi
# Verifies that the path contains the variant string (simulator or maccatalyst) if the variant is set.
if [[ -z "$target_variant" && ("${paths[$i]}" == *"simulator"* || "${paths[$i]}" == *"maccatalyst"*) ]]; then
matched_all_archs="0"
break
fi
# This regex matches all possible variants of the arch in the folder name:
# Let's say the folder name is: ios-armv7_armv7s_arm64_arm64e/CoconutLib.framework
# We match the following: -armv7_, _armv7s_, _arm64_ and _arm64e/.
# If we have a specific variant: ios-i386_x86_64-simulator/CoconutLib.framework
# We match the following: -i386_ and _x86_64-
# When the .xcframework wraps a static library, the folder name does not include
# any .framework. In that case, the folder name can be: ios-arm64_armv7
# We also match _armv7$ to handle that case.
local target_arch_regex="[_\-]${target_arch}([\/_\-]|$)"
if ! [[ "${paths[$i]}" =~ $target_arch_regex ]]; then
matched_all_archs="0"
break
fi
done
if [[ "$matched_all_archs" == "1" ]]; then
# Found a matching slice
echo "Selected xcframework slice ${paths[$i]}"
SELECT_SLICE_RETVAL=${paths[$i]}
break
fi
done
}
install_xcframework() {
local basepath="$1"
local name="$2"
local package_type="$3"
local paths=("${@:4}")
# Locate the correct slice of the .xcframework for the current architectures
select_slice "${paths[@]}"
local target_path="$SELECT_SLICE_RETVAL"
if [[ -z "$target_path" ]]; then
echo "warning: [CP] Unable to find matching .xcframework slice in '${paths[@]}' for the current build architectures ($ARCHS)."
return
fi
local source="$basepath/$target_path"
local destination="${PODS_XCFRAMEWORKS_BUILD_DIR}/${name}"
if [ ! -d "$destination" ]; then
mkdir -p "$destination"
fi
copy_dir "$source/" "$destination"
echo "Copied $source to $destination"
}
install_xcframework "${PODS_ROOT}/FirebaseAnalytics/Frameworks/FirebaseAnalytics.xcframework" "FirebaseAnalytics/Base" "framework" "ios-arm64_i386_x86_64-simulator" "ios-arm64_armv7"
|
exoplatform/exo-ios
|
Pods/Target Support Files/FirebaseAnalytics/FirebaseAnalytics-xcframeworks.sh
|
Shell
|
lgpl-3.0
| 3,806 |
#!/bin/bash
# 2. Speaker GMM model adaptation
echo "Train Speaker dependent GMMs"
bin/TrainTarget --config cfg/TrainTarget.cfg &> log/TrainTarget.cfg
echo " done, see log/TrainTarget.cfg for details"
# 3. Speaker model comparison
echo "Compute Likelihood"
bin/ComputeTest --config cfg/ComputeTest_GMM.cfg &> log/ComputeTest.cfg
echo " done, see log/ComputeTest.cfg"
|
ftahmed/Mistral-Speaker-Recognition-Tutorial
|
scripts/04_TrainTargets.sh
|
Shell
|
apache-2.0
| 376 |
#!/bin/bash
BROKER_DIR="$(dirname "${BASH_SOURCE}")/../.."
source "${BROKER_DIR}/scripts/broker-ci/logs.sh"
source "${BROKER_DIR}/scripts/broker-ci/error.sh"
BIND_ERROR=false
PROVISION_ERROR=false
POD_PRESET_ERROR=false
VERIFY_CI_ERROR=false
UNBIND_ERROR=false
DEPROVISION_ERROR=false
DEVAPI_ERROR=false
RESOURCE_ERROR="${RESOURCE_ERROR:-false}"
BUILD_ERROR="${BUILD_ERROR:-false}"
MAKE_DEPLOY_ERROR="${MAKE_DELOY_ERROR:-false}"
CLUSTER_SETUP_ERROR="${CLUSTER_SETUP_ERROR:-false}"
LOCAL_CI="${LOCAL_CI:-true}"
declare -r color_start="\033["
declare -r color_red="${color_start}0;31m"
declare -r color_yellow="${color_start}0;33m"
declare -r color_green="${color_start}0;32m"
declare -r color_norm="${color_start}0m"
set -x
function provision {
oc create -f ./scripts/broker-ci/mediawiki123.yaml || PROVISION_ERROR=true
oc create -f ./scripts/broker-ci/postgresql.yaml || PROVISION_ERROR=true
./scripts/broker-ci/wait-for-resource.sh create pod mediawiki >> /tmp/wait-for-pods-log 2>&1
./scripts/broker-ci/wait-for-resource.sh create pod postgresql >> /tmp/wait-for-pods-log 2>&1
error-check "provision"
}
function deprovision {
oc delete -f ./scripts/broker-ci/mediawiki123.yaml || PROVISION_ERROR=true
oc delete -f ./scripts/broker-ci/postgresql.yaml || PROVISION_ERROR=true
./scripts/broker-ci/wait-for-resource.sh delete pod mediawiki >> /tmp/wait-for-pods-log 2>&1
./scripts/broker-ci/wait-for-resource.sh delete pod postgresql >> /tmp/wait-for-pods-log 2>&1
}
function bind {
print-with-green "Waiting for services to be ready"
sleep 10
oc create -f ./scripts/broker-ci/bind-mediawiki-postgresql.yaml || BIND_ERROR=true
./scripts/broker-ci/wait-for-resource.sh create bindings.v1alpha1.servicecatalog.k8s.io mediawiki-postgresql-binding >> /tmp/wait-for-pods-log 2>&1
error-check "bind"
}
function unbind {
print-with-green "Waiting for podpresets to be removed"
oc delete -f ./scripts/broker-ci/bind-mediawiki-postgresql.yaml || BIND_ERROR=true
./scripts/broker-ci/wait-for-resource.sh delete podpresets mediawiki-postgresql-binding >> /tmp/wait-for-pods-log 2>&1
}
function bind-credential-check {
set +x
RETRIES=10
for x in $(seq $RETRIES); do
oc delete pods $(oc get pods -o name -l app=mediawiki123 -n default | head -1 | cut -f 2 -d '/') -n default --force --grace-period=10 || BIND_ERROR=true
./scripts/broker-ci/wait-for-resource.sh create pod mediawiki >> /tmp/wait-for-pods-log 2>&1
# Filter for 'podpreset.admission.kubernetes.io' in the pod
preset_test=$(oc get pods $(oc get pods -n default | grep mediawiki | awk $'{ print $1 }') -o yaml -n default | grep podpreset | awk $'{ print $1}' | cut -f 1 -d '/')
if [ "${preset_test}" = "podpreset.admission.kubernetes.io" ]; then
print-with-green "Pod presets found in the MediaWiki pod"
break
else
print-with-yellow "Pod presets not found in the MediaWiki pod"
print-with-yellow "Retrying..."
fi
done
if [ "${x}" -eq "${RETRIES}" ]; then
print-with-red "Pod presets aren't in the MediaWiki pod"
BIND_ERROR=true
fi
set -x
}
function pickup-pod-presets {
print-with-green "Checking if MediaWiki received bind credentials"
bind-credential-check
error-check "pickup-pod-presets"
}
function verify-ci-run {
ROUTE=$(oc get route -n default | grep mediawiki | cut -f 4 -d ' ')/index.php/Main_Page
BIND_CHECK=$(curl ${ROUTE}| grep "div class" | cut -f 2 -d "'")
print-with-yellow "Running: curl ${ROUTE}| grep \"div class\" | cut -f 2 -d \"'\""
if [ "${BIND_CHECK}" = "error" ]; then
VERIFY_CI_ERROR=true
elif [ "${BIND_CHECK}" = "" ]; then
print-with-red "Failed to gather data from ${ROUTE}"
VERIFY_CI_ERROR=true
else
print-with-green "SUCCESS"
print-with-green "You can double check by opening http://${ROUTE} in your browser"
fi
error-check "verify-ci-run"
}
function verify-cleanup {
if oc get -n default podpresets mediawiki-postgresql-binding ; then
UNBIND_ERROR=true
elif oc get -n default dc mediawiki || oc get -n default dc postgresql ; then
DEPROVISION_ERROR=true
fi
}
function dev-api-test {
print-with-green "Waiting for foo apb servicename"
BROKERURL=$(oc get -n ansible-service-broker route -o custom-columns=host:spec.host --no-headers)
APBID=$(curl -s -k -XPOST -u admin:admin https://$BROKERURL/apb/spec -d "apbSpec=$(base64 scripts/broker-ci/apb.yml)"| \
python -c "import sys; import json; print json.load(sys.stdin)['services'][0]['id']")
sleep 10
oc delete pod -n service-catalog -l app=controller-manager
./scripts/broker-ci/wait-for-resource.sh create serviceclass apb-push-ansibleplaybookbundle-foo-apb >> /tmp/wait-for-pods-log 2>&1
if ! curl -I -s -k -XDELETE -u admin:admin https://$BROKERURL/apb/spec/$APBID | grep -q "204 No Content" ; then
DEVAPI_ERROR=true
fi
}
######
# Main
######
provision
bind
pickup-pod-presets
verify-ci-run
unbind
deprovision
verify-cleanup
dev-api-test
convert-to-red
error-variables
|
jmontleon/ansible-service-broker
|
scripts/broker-ci/local-ci.sh
|
Shell
|
apache-2.0
| 5,040 |
#!/bin/bash
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
clear;
echo '================================================================';
echo ' [LNMP/Nginx] Amysql Host - AMH 4.2 (vpskk mod v45)';
echo ' http://Amysql.com';
echo ' http://vpskk.com';
echo '================================================================';
# VAR ***************************************************************************************
AMHDir='/home/amh_install/';
SysName='';
SysBit='';
Cpunum='';
RamTotal='';
RamSwap='';
InstallModel='';
confirm='';
Domain=`ifconfig | grep 'inet addr:'| egrep -v ":192.168|:172.1[6-9].|:172.2[0-9].|:172.3[0-2].|:10\.|:127." | cut -d: -f2 | awk '{ print $1}'`;
MysqlPass='';
AMHPass='';
StartDate='';
StartDateSecond='';
PHPDisable='';
# Version
AMSVersion='ams-1.5.0107-02';
AMHVersion='amh-4.5';
LibiconvVersion='libiconv-1.14';
Mysql55Version='mysql-5.5.53';
Mysql56Version='mysql-5.6.34';
Mysql57Version='mysql-5.7.16';
Mariadb55Version='mariadb-galera-5.5.53';
Mariadb10Version='mariadb-10.1.18';
Php53Version='php-5.3.29';
Php54Version='php-5.4.45';
Php55Version='php-5.5.38';
Php56Version='php-5.6.27';
Php70Version='php-7.0.12';
NginxVersion='nginx-1.9.6';
OpenSSLVersion='openssl-1.0.2d';
NginxCachePurgeVersion='ngx_cache_purge-2.3';
PureFTPdVersion='pure-ftpd-1.0.36';
# Function List *****************************************************************************
function CheckSystem()
{
[ $(id -u) != '0' ] && echo '[Error] Please use root to install AMH.' && exit;
egrep -i "centos" /etc/issue && SysName='centos';
egrep -i "debian" /etc/issue && SysName='debian';
egrep -i "ubuntu" /etc/issue && SysName='ubuntu';
[ "$SysName" == '' ] && echo '[Error] Your system is not supported install AMH' && exit;
SysBit='32' && [ `getconf WORD_BIT` == '32' ] && [ `getconf LONG_BIT` == '64' ] && SysBit='64';
Cpunum=`cat /proc/cpuinfo | grep 'processor' | wc -l`;
RamTotal=`free -m | grep 'Mem' | awk '{print $2}'`;
RamSwap=`free -m | grep 'Swap' | awk '{print $2}'`;
echo "Server ${Domain}";
echo "${SysBit}Bit, ${Cpunum}*CPU, ${RamTotal}MB*RAM, ${RamSwap}MB*Swap";
echo '================================================================';
RamSum=$[$RamTotal+$RamSwap];
[ "$SysBit" == '32' ] && [ "$RamSum" -lt '250' ] && \
echo -e "[Error] Not enough memory install AMH. \n(32bit system need memory: ${RamTotal}MB*RAM + ${RamSwap}MB*Swap > 250MB)" && exit;
if [ "$SysBit" == '64' ] && [ "$RamSum" -lt '480' ]; then
echo -e "[Error] Not enough memory install AMH. \n(64bit system need memory: ${RamTotal}MB*RAM + ${RamSwap}MB*Swap > 480MB)";
[ "$RamSum" -gt '250' ] && echo "[Notice] Please use 32bit system.";
exit;
fi;
[ "$RamSum" -lt '600' ] && PHPDisable='--disable-fileinfo';
}
function ConfirmInstall()
{
echo "[Notice] Confirm Install/Uninstall AMH? please select: (1~3)"
select selected in 'Install AMH 4.2' 'Uninstall AMH 4.2' 'Exit'; do break; done;
[ "$selected" == 'Exit' ] && echo 'Exit Install.' && exit;
if [ "$selected" == 'Install AMH 4.2' ]; then
InstallModel='1';
elif [ "$selected" == 'Uninstall AMH 4.2' ]; then
Uninstall;
else
ConfirmInstall;
return;
fi;
echo "[Notice] Confirm Install Mysql / Mariadb? please select: (1~6)"
select DBselect in 'Mysql-5.5.46' 'Mysql-5.6.2' 'Mysql-5.7.9' 'Mariadb-5.5.47' 'Mariadb-10.1.11' 'Exit'; do break; done;
[ "$DBselect" == 'Exit' ] && echo 'Exit Install.' && exit;
if [ "$DBselect" == 'Mysql-5.5.46' ]; then
confirm='1' && echo '[OK] Mysql-5.5.46 installed';
elif [ "$DBselect" == 'Mysql-5.6.2' ]; then
confirm='2' && echo '[OK] Mysql-5.6.2 installed';
elif [ "$DBselect" == 'Mysql-5.7.9' ]; then
confirm='3' && echo '[OK] Mysql-5.7.9 installed';
elif [ "$DBselect" == 'Mariadb-5.5.47' ]; then
confirm='4' && echo '[OK] Mariadb-5.5.47 installed';
elif [ "$DBselect" == 'Mariadb-10.1.11' ]; then
confirm='5' && echo '[OK] Mariadb-10.1.11 installed';
else
ConfirmInstall;
return;
fi;
echo "[OK] You Selected: ${DBselect}";
read -p '[Notice] Do you want PHP5.3? : (y/n)' confirm53;
[ "$confirm53" == 'y' ] && echo '[OK] php5.3 will be installed';
read -p '[Notice] Do you want PHP5.4? : (y/n)' confirm54;
[ "$confirm54" == 'y' ] && echo '[OK] php5.4 will be installed';
read -p '[Notice] Do you want PHP5.5? : (y/n)' confirm55;
[ "$confirm55" == 'y' ] && echo '[OK] php5.5 will be installed';
read -p '[Notice] Do you want PHP7.0? : (y/n)' confirm70;
[ "$confirm70" == 'y' ] && echo '[OK] php7.0 will be installed';
}
function InputDomain()
{
if [ "$Domain" == '' ]; then
echo '[Error] empty server ip.';
read -p '[Notice] Please input server ip:' Domain;
[ "$Domain" == '' ] && InputDomain;
fi;
[ "$Domain" != '' ] && echo '[OK] Your server ip is:' && echo $Domain;
}
function InputMysqlPass()
{
read -p '[Notice] Please input MySQL password:' MysqlPass;
if [ "$MysqlPass" == '' ]; then
echo '[Error] MySQL password is empty.';
InputMysqlPass;
else
echo '[OK] Your MySQL password is:';
echo $MysqlPass;
fi;
}
function InputAMHPass()
{
read -p '[Notice] Please input AMH password:' AMHPass;
if [ "$AMHPass" == '' ]; then
echo '[Error] AMH password empty.';
InputAMHPass;
else
echo '[OK] Your AMH password is:';
echo $AMHPass;
fi;
}
function Timezone()
{
rm -rf /etc/localtime;
ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime;
echo '[ntp Installing] ******************************** >>';
[ "$SysName" == 'centos' ] && yum install -y ntp || apt-get install -y ntpdate;
ntpdate -u pool.ntp.org;
StartDate=$(date);
StartDateSecond=$(date +%s);
echo "Start time: ${StartDate}";
}
function CloseSelinux()
{
[ -s /etc/selinux/config ] && sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config;
}
function DeletePackages()
{
if [ "$SysName" == 'centos' ]; then
yum -y remove httpd;
yum -y remove php;
yum -y remove mysql-server mysql;
yum -y remove php-mysql;
else
apt-get --purge remove nginx
apt-get --purge remove mysql-server;
apt-get --purge remove mysql-common;
apt-get --purge remove php;
fi;
}
function InstallBasePackages()
{
if [ "$SysName" == 'centos' ]; then
echo '[yum-fastestmirror Installing] ************************************************** >>';
yum -y install yum-fastestmirror;
cp /etc/yum.conf /etc/yum.conf.lnmp
sed -i 's:exclude=.*:exclude=:g' /etc/yum.conf
for packages in gcc gcc-c++ ncurses-devel libxml2-devel openssl-devel curl-devel libjpeg-devel libpng-devel autoconf pcre-devel libtool-libs freetype-devel gd zlib-devel zip unzip wget crontabs iptables file bison cmake patch mlocate flex diffutils automake make readline-devel glibc-devel glibc-static glib2-devel bzip2-devel gettext-devel libcap-devel logrotate ftp openssl expect; do
echo "[${packages} Installing] ************************************************** >>";
yum -y install $packages;
done;
mv -f /etc/yum.conf.lnmp /etc/yum.conf;
else
apt-get remove -y apache2 apache2-doc apache2-utils apache2.2-common apache2.2-bin apache2-mpm-prefork apache2-doc apache2-mpm-worker mysql-client mysql-server mysql-common php;
killall apache2;
apt-get update;
for packages in build-essential gcc g++ cmake make ntp logrotate automake patch autoconf autoconf2.13 re2c wget flex cron libzip-dev libc6-dev rcconf bison cpp binutils unzip tar bzip2 libncurses5-dev libncurses5 libtool libevent-dev libpcre3 libpcre3-dev libpcrecpp0 libssl-dev zlibc openssl libsasl2-dev libxml2 libxml2-dev libltdl3-dev libltdl-dev zlib1g zlib1g-dev libbz2-1.0 libbz2-dev libglib2.0-0 libglib2.0-dev libpng3 libfreetype6 libfreetype6-dev libjpeg62 libjpeg62-dev libjpeg-dev libpng-dev libpng12-0 libpng12-dev curl libcurl3 libpq-dev libpq5 gettext libcurl4-gnutls-dev libcurl4-openssl-dev libcap-dev ftp openssl expect; do
echo "[${packages} Installing] ************************************************** >>";
apt-get install -y $packages --force-yes;apt-get -fy install;apt-get -y autoremove;
done;
fi;
}
function Downloadfile()
{
randstr=$(date +%s);
cd $AMHDir/packages;
if [ -s $1 ]; then
echo "[OK] $1 found.";
else
echo "[Notice] $1 not found, download now......";
if ! wget -c --tries=3 ${2}?${randstr} ; then
echo "[Error] Download Failed : $1, please check $2 ";
exit;
else
mv ${1}?${randstr} $1;
fi;
fi;
}
function InstallReady()
{
mkdir -p $AMHDir/conf;
mkdir -p $AMHDir/packages/untar;
chmod +Rw $AMHDir/packages;
mkdir -p /root/amh/;
chmod +Rw /root/amh;
cd $AMHDir/packages;
wget http://soft.aeink.com/amh/conf.zip;
unzip conf.zip -d $AMHDir/conf;
}
# Install Function *********************************************************
function Uninstall()
{
amh host list 2>/dev/null;
echo -e "\033[41m\033[37m[Warning] Please backup your data first. Uninstall will delete all the data!!! \033[0m ";
read -p '[Notice] Backup the data now? : (y/n)' confirmBD;
[ "$confirmBD" != 'y' -a "$confirmBD" != 'n' ] && exit;
[ "$confirmBD" == 'y' ] && amh backup;
echo '=============================================================';
read -p '[Notice] Confirm Uninstall(Delete All Data)? : (y/n)' confirmUN;
[ "$confirmUN" != 'y' ] && exit;
amh mysql stop 2>/dev/null;
amh php stop 2>/dev/null;
amh nginx stop 2>/dev/null;
killall nginx;
killall mysqld;
killall pure-ftpd;
killall php-cgi;
killall php-fpm;
[ "$SysName" == 'centos' ] && chkconfig amh-start off || update-rc.d -f amh-start remove;
rm -rf /etc/init.d/amh-start;
rm -rf /usr/local/libiconv;
rm -rf /usr/local/$OpenSSLVersion;
rm -rf /usr/local/nginx/ ;
rm -rf /usr/local/boost_1_59_0/;
for line in `ls /root/amh/modules`; do
amh module $line uninstall;
done;
rm -rf /usr/local/mysql/ /etc/my.cnf /etc/ld.so.conf.d/mysql.conf /usr/bin/mysql /var/lock/subsys/mysql /var/spool/mail/mysql;
rm -rf /home/mysqldata;
rm -rf /usr/local/php/ /usr/local/php5.3/ /usr/local/php5.4/ /usr/local/php5.5/ /usr/local/php7.0/ /usr/lib/php /etc/php.ini /etc/php.d /usr/local/zend;
rm -rf /home/wwwroot/;
rm -rf /home/proxyroot/;
rm -rf /etc/pure-ftpd.conf /etc/pam.d/ftp /usr/local/sbin/pure-ftpd /etc/pureftpd.passwd /etc/amh-iptables;
rm -rf /etc/logrotate.d/nginx /root/.mysqlroot;
rm -rf /root/amh /bin/amh;
rm -rf $AMHDir;
rm -f /usr/bin/{mysqld_safe,myisamchk,mysqldump,mysqladmin,mysql,nginx,php-fpm,phpize,php};
echo '[OK] Successfully uninstall AMH.';
exit;
}
function InstallLibiconv()
{
echo "[${LibiconvVersion} Installing] ************************************************** >>";
Downloadfile "${LibiconvVersion}.tar.gz" "http://soft.aeink.com/amh/files/${LibiconvVersion}.tar.gz";
rm -rf $AMHDir/packages/untar/$LibiconvVersion;
echo "tar -zxf ${LibiconvVersion}.tar.gz ing...";
tar -zxf $AMHDir/packages/$LibiconvVersion.tar.gz -C $AMHDir/packages/untar;
if [ ! -d /usr/local/libiconv ]; then
cd $AMHDir/packages/untar/$LibiconvVersion;
./configure --prefix=/usr/local/libiconv;
make;
make install;
echo "[OK] ${LibiconvVersion} install completed.";
else
echo '[OK] libiconv is installed!';
fi;
}
function InstallOpenSSL()
{
echo "[${OpenSSLVersion} Installing] ************************************************** >>";
Downloadfile "${OpenSSLVersion}.tar.gz" "http://soft.aeink.com/amh/files/${OpenSSLVersion}.tar.gz";
rm -rf $AMHDir/packages/untar/$OpenSSLVersion;
echo "tar -zxf ${OpenSSLVersion}.tar.gz ing...";
tar -zxf $AMHDir/packages/$OpenSSLVersion.tar.gz -C /usr/local;
echo "[OK] ${OpenSSLVersion} tar completed.";
}
function InstallMysql55()
{
if [ "$confirm" == '1' ]; then
# [dir] /usr/local/mysql/
echo "[${Mysql55Version} Installing] ************************************************** >>";
Downloadfile "${Mysql55Version}.tar.gz" "http://mirrors.sohu.com/mysql/MySQL-5.5/${Mysql55Version}.tar.gz";
rm -rf $AMHDir/packages/untar/$Mysql55Version;
echo "tar -zxf ${Mysql55Version}.tar.gz ing...";
tar -zxf $AMHDir/packages/$Mysql55Version.tar.gz -C $AMHDir/packages/untar;
if [ ! -f /usr/local/mysql/bin/mysql ]; then
cd $AMHDir/packages/untar/$Mysql55Version;
groupadd mysql;
useradd -s /sbin/nologin -g mysql mysql;
cmake -DCMAKE_INSTALL_PREFIX=/usr/local/mysql -DDEFAULT_CHARSET=utf8 -DDEFAULT_COLLATION=utf8_general_ci -DWITH_EXTRA_CHARSETS=complex -DWITH_READLINE=1 -DENABLED_LOCAL_INFILE=1;
#http://forge.mysql.com/wiki/Autotools_to_CMake_Transition_Guide
make -j $Cpunum;
make install;
chmod +w /usr/local/mysql;
chown -R mysql:mysql /usr/local/mysql;
mkdir -p /home/mysqldata;
chown -R mysql:mysql /home/mysqldata;
rm -f /etc/mysql/my.cnf /usr/local/mysql/etc/my.cnf;
cp $AMHDir/conf/my.cnf /etc/my.cnf;
cp $AMHDir/conf/mysql /root/amh/mysql;
chmod +x /root/amh/mysql;
/usr/local/mysql/scripts/mysql_install_db --user=mysql --defaults-file=/etc/my.cnf --basedir=/usr/local/mysql --datadir=/home/mysqldata;
# EOF **********************************
cat > /etc/ld.so.conf.d/mysql.conf<<EOF
/usr/local/mysql/lib/mysql
/usr/local/lib
EOF
# **************************************
ldconfig;
if [ "$SysBit" == '64' ] ; then
ln -s /usr/local/mysql/lib/mysql /usr/lib64/mysql;
else
ln -s /usr/local/mysql/lib/mysql /usr/lib/mysql;
fi;
chmod 775 /usr/local/mysql/support-files/mysql.server;
/usr/local/mysql/support-files/mysql.server start;
ln -s /usr/local/mysql/bin/mysql /usr/bin/mysql;
ln -s /usr/local/mysql/bin/mysqladmin /usr/bin/mysqladmin;
ln -s /usr/local/mysql/bin/mysqldump /usr/bin/mysqldump;
ln -s /usr/local/mysql/bin/myisamchk /usr/bin/myisamchk;
ln -s /usr/local/mysql/bin/mysqld_safe /usr/bin/mysqld_safe;
/usr/local/mysql/bin/mysqladmin password $MysqlPass;
rm -rf /usr/local/mysql/data/test;
# EOF **********************************
mysql -hlocalhost -uroot -p$MysqlPass <<EOF
USE mysql;
DELETE FROM user WHERE User!='root' OR (User = 'root' AND Host != 'localhost');
UPDATE user set password=password('$MysqlPass') WHERE User='root';
DROP USER ''@'%';
FLUSH PRIVILEGES;
EOF
# **************************************
echo "[OK] ${Mysql55Version} install completed.";
else
echo '[OK] MySQL is installed.';
fi;
else
InstallMysql56;
fi;
}
function InstallMysql56()
{
if [ "$confirm" == '2' ]; then
# [dir] /usr/local/mysql/
echo "[${Mysql56Version} Installing] ************************************************** >>";
Downloadfile "${Mysql56Version}.tar.gz" "http://mirrors.sohu.com/mysql/MySQL-5.6/${Mysql56Version}.tar.gz";
rm -rf $AMHDir/packages/untar/$Mysql56Version;
echo "tar -zxf ${Mysql56Version}.tar.gz ing...";
tar -zxf $AMHDir/packages/$Mysql56Version.tar.gz -C $AMHDir/packages/untar;
if [ ! -f /usr/local/mysql/bin/mysql ]; then
cd $AMHDir/packages/untar/$Mysql56Version;
groupadd mysql;
useradd -s /sbin/nologin -g mysql mysql;
cmake -DCMAKE_INSTALL_PREFIX=/usr/local/mysql -DDEFAULT_CHARSET=utf8 -DDEFAULT_COLLATION=utf8_general_ci -DWITH_EXTRA_CHARSETS=complex -DWITH_READLINE=1 -DENABLED_LOCAL_INFILE=1;
#http://forge.mysql.com/wiki/Autotools_to_CMake_Transition_Guide
make -j $Cpunum;
make install;
chmod +w /usr/local/mysql;
chown -R mysql:mysql /usr/local/mysql;
mkdir -p /home/mysqldata;
chown -R mysql:mysql /home/mysqldata;
rm -f /etc/mysql/my.cnf /usr/local/mysql/etc/my.cnf;
cp $AMHDir/conf/my56.cnf /etc/my.cnf;
cp $AMHDir/conf/mysql /root/amh/mysql;
chmod +x /root/amh/mysql;
/usr/local/mysql/scripts/mysql_install_db --user=mysql --defaults-file=/etc/my.cnf --basedir=/usr/local/mysql --datadir=/home/mysqldata;
# EOF **********************************
cat > /etc/ld.so.conf.d/mysql.conf<<EOF
/usr/local/mysql/lib/mysql
/usr/local/lib
EOF
# **************************************
ldconfig;
if [ "$SysBit" == '64' ] ; then
ln -s /usr/local/mysql/lib/mysql /usr/lib64/mysql;
else
ln -s /usr/local/mysql/lib/mysql /usr/lib/mysql;
fi;
chmod 775 /usr/local/mysql/support-files/mysql.server;
/usr/local/mysql/support-files/mysql.server start;
ln -s /usr/local/mysql/bin/mysql /usr/bin/mysql;
ln -s /usr/local/mysql/bin/mysqladmin /usr/bin/mysqladmin;
ln -s /usr/local/mysql/bin/mysqldump /usr/bin/mysqldump;
ln -s /usr/local/mysql/bin/myisamchk /usr/bin/myisamchk;
ln -s /usr/local/mysql/bin/mysqld_safe /usr/bin/mysqld_safe;
/usr/local/mysql/bin/mysqladmin password $MysqlPass;
rm -rf /usr/local/mysql/data/test;
# EOF **********************************
mysql -hlocalhost -uroot -p$MysqlPass <<EOF
USE mysql;
DELETE FROM user WHERE User!='root' OR (User = 'root' AND Host != 'localhost');
UPDATE user set password=password('$MysqlPass') WHERE User='root';
DROP USER ''@'%';
FLUSH PRIVILEGES;
EOF
# **************************************
echo "[OK] ${Mysql56Version} install completed.";
else
echo '[OK] MySQL is installed.';
fi;
else
InstallMysql57;
fi;
}
function InstallMysql57()
{
if [ "$confirm" == '3' ]; then
cd $AMHDir/packages/
wget http://www.automoviel.com/AMH/boost_1_59_0.tar.gz;
# [dir] /usr/local/mysql/
echo "[${Mysql57Version} Installing] ************************************************** >>";
Downloadfile "${Mysql57Version}.tar.gz" "http://mirrors.sohu.com/mysql/MySQL-5.7/${Mysql57Version}.tar.gz";
rm -rf $AMHDir/packages/untar/$Mysql57Version;
echo "tar -zxf ${Mysql57Version}.tar.gz ing...";
tar -zxf $AMHDir/packages/$Mysql57Version.tar.gz -C $AMHDir/packages/untar;
mkdir -p /usr/local/boost_1_59_0;
tar -xzvf $AMHDir/packages/boost_1_59_0.tar.gz -C /usr/local/;
cd /usr/local/boost_1_59_0;
./bootstrap.sh;
./b2;
./b2 install;
cd $AMHDir/packages/
#rm -rf boost_1_59_0.tar.gz;
if [ ! -f /usr/local/mysql/bin/mysql ]; then
cd $AMHDir/packages/untar/$Mysql57Version;
groupadd mysql;
useradd -s /sbin/nologin -g mysql mysql;
cmake -DCMAKE_INSTALL_PREFIX=/usr/local/mysql -DDEFAULT_CHARSET=utf8 -DDEFAULT_COLLATION=utf8_general_ci -DWITH_EXTRA_CHARSETS=complex -DWITH_READLINE=1 -DENABLED_LOCAL_INFILE=1;
#http://forge.mysql.com/wiki/Autoto ls_to_CMake_Transition_Guide
make -j $Cpunum;
make install;
chmod +w /usr/local/mysql;
chown -R mysql:mysql /usr/local/mysql;
mkdir -p /home/mysqldata;
chown -R mysql:mysql /home/mysqldata;
rm -f /etc/mysql/my.cnf /usr/local/mysql/etc/my.cnf;
cp $AMHDir/conf/my57.cnf /etc/my.cnf;
cp $AMHDir/conf/mysql /root/amh/mysql;
chmod +x /root/amh/mysql;
/usr/local/mysql/bin/mysql_install_db --user=mysql --defaults-file=/etc/my.cnf --basedir=/usr/local/mysql --datadir=/home/mysqldata;
# EOF **********************************
cat > /etc/ld.so.conf.d/mysql.conf<<EOF
/usr/local/mysql/lib/mysql
/usr/local/lib
EOF
# **************************************
ldconfig;
if [ "$SysBit" == '64' ] ; then
ln -s /usr/local/mysql/lib/mysql /usr/lib64/mysql;
else
ln -s /usr/local/mysql/lib/mysql /usr/lib/mysql;
fi;
chmod 775 /usr/local/mysql/support-files/mysql.server;
/usr/local/mysql/support-files/mysql.server start;
ln -s /usr/local/mysql/bin/mysql /usr/bin/mysql;
ln -s /usr/local/mysql/bin/mysqladmin /usr/bin/mysqladmin;
ln -s /usr/local/mysql/bin/mysqldump /usr/bin/mysqldump;
ln -s /usr/local/mysql/bin/myisamchk /usr/bin/myisamchk;
ln -s /usr/local/mysql/bin/mysqld_safe /usr/bin/mysqld_safe;
/usr/local/mysql/bin/mysqladmin password $MysqlPass;
rm -rf /usr/local/mysql/data/test;
# EOF **********************************
mysql -hlocalhost -uroot -p$MysqlPass <<EOF
USE mysql;
update user set authentication_string = PASSWORD('$MysqlPass') where user = 'root';
FLUSH PRIVILEGES;
EOF
# **************************************
echo "[OK] ${Mysql57Version} install completed.";
else
echo '[OK] MySQL is installed.';
fi;
else
InstallMariadb55;
fi;
}
function InstallMariadb55()
{
if [ "$confirm" == '4' ]; then
# [dir] /usr/local/mysql/
echo "[${Mariadb55Version} Installing] ************************************************** >>";
Downloadfile "${Mariadb55Version}.tar.gz" "http://mirrors.ustc.edu.cn/mariadb/${Mariadb55Version}/source/${Mariadb55Version}.tar.gz";
rm -rf $AMHDir/packages/untar/$Mariadb55Version;
echo "tar -zxf ${Mariadb55Version}.tar.gz ing...";
tar -zxf $AMHDir/packages/$Mariadb55Version.tar.gz -C $AMHDir/packages/untar;
if [ ! -f /usr/local/mysql/bin/mysql ]; then
cd $AMHDir/packages/untar/$Mariadb55Version;
groupadd mysql;
useradd -s /sbin/nologin -g mysql mysql;
cmake -DCMAKE_INSTALL_PREFIX=/usr/local/mysql -DDEFAULT_CHARSET=utf8 -DDEFAULT_COLLATION=utf8_general_ci -DWITH_EXTRA_CHARSETS=complex -DWITH_READLINE=1 -DENABLED_LOCAL_INFILE=1;
#http://forge.mysql.com/wiki/Autotools_to_CMake_Transition_Guide
make -j $Cpunum;
make install;
chmod +w /usr/local/mysql;
chown -R mysql:mysql /usr/local/mysql;
mkdir -p /home/mysqldata;
chown -R mysql:mysql /home/mysqldata;
rm -f /etc/mysql/my.cnf /usr/local/mysql/etc/my.cnf;
cp $AMHDir/conf/my.cnf /etc/my.cnf;
cp $AMHDir/conf/mysql /root/amh/mysql;
chmod +x /root/amh/mysql;
/usr/local/mysql/scripts/mysql_install_db --user=mysql --defaults-file=/etc/my.cnf --basedir=/usr/local/mysql --datadir=/home/mysqldata;
# EOF **********************************
cat > /etc/ld.so.conf.d/mysql.conf<<EOF
/usr/local/mysql/lib/mysql
/usr/local/lib
EOF
# **************************************
ldconfig;
if [ "$SysBit" == '64' ] ; then
ln -s /usr/local/mysql/lib/mysql /usr/lib64/mysql;
else
ln -s /usr/local/mysql/lib/mysql /usr/lib/mysql;
fi;
chmod 775 /usr/local/mysql/support-files/mysql.server;
/usr/local/mysql/support-files/mysql.server start;
ln -s /usr/local/mysql/bin/mysql /usr/bin/mysql;
ln -s /usr/local/mysql/bin/mysqladmin /usr/bin/mysqladmin;
ln -s /usr/local/mysql/bin/mysqldump /usr/bin/mysqldump;
ln -s /usr/local/mysql/bin/myisamchk /usr/bin/myisamchk;
ln -s /usr/local/mysql/bin/mysqld_safe /usr/bin/mysqld_safe;
/usr/local/mysql/bin/mysqladmin password $MysqlPass;
rm -rf /usr/local/mysql/data/test;
# EOF **********************************
mysql -hlocalhost -uroot -p$MysqlPass <<EOF
USE mysql;
DELETE FROM user WHERE User!='root' OR (User = 'root' AND Host != 'localhost');
UPDATE user set password=password('$MysqlPass') WHERE User='root';
DROP USER ''@'%';
FLUSH PRIVILEGES;
EOF
# **************************************
echo "[OK] ${Mariadb55Version} install completed.";
else
echo '[OK] MySQL is installed.';
fi;
else
InstallMariadb10;
fi;
}
function InstallMariadb10()
{
if [ "$confirm" == '5' ]; then
# [dir] /usr/local/mysql/
echo "[${Mariadb10Version} Installing] ************************************************** >>";
Downloadfile "${Mariadb10Version}.tar.gz" "http://mirrors.ustc.edu.cn/mariadb/${Mariadb10Version}/source/${Mariadb10Version}.tar.gz";
rm -rf $AMHDir/packages/untar/$Mariadb10Version;
echo "tar -zxf ${Mariadb10Version}.tar.gz ing...";
tar -zxf $AMHDir/packages/$Mariadb10Version.tar.gz -C $AMHDir/packages/untar;
if [ ! -f /usr/local/mysql/bin/mysql ]; then
cd $AMHDir/packages/untar/$Mariadb10Version;
groupadd mysql;
useradd -s /sbin/nologin -g mysql mysql;
cmake -DCMAKE_INSTALL_PREFIX=/usr/local/mysql -DDEFAULT_CHARSET=utf8 -DDEFAULT_COLLATION=utf8_general_ci -DWITH_EXTRA_CHARSETS=complex -DWITH_READLINE=1 -DENABLED_LOCAL_INFILE=1;
#http://forge.mysql.com/wiki/Autotools_to_CMake_Transition_Guide
make -j $Cpunum;
make install;
chmod +w /usr/local/mysql;
chown -R mysql:mysql /usr/local/mysql;
mkdir -p /home/mysqldata;
chown -R mysql:mysql /home/mysqldata;
rm -f /etc/mysql/my.cnf /usr/local/mysql/etc/my.cnf;
cp $AMHDir/conf/my.cnf /etc/my.cnf;
cp $AMHDir/conf/mysql /root/amh/mysql;
chmod +x /root/amh/mysql;
/usr/local/mysql/scripts/mysql_install_db --user=mysql --defaults-file=/etc/my.cnf --basedir=/usr/local/mysql --datadir=/home/mysqldata;
# EOF **********************************
cat > /etc/ld.so.conf.d/mysql.conf<<EOF
/usr/local/mysql/lib/mysql
/usr/local/lib
EOF
# **************************************
ldconfig;
if [ "$SysBit" == '64' ] ; then
ln -s /usr/local/mysql/lib/mysql /usr/lib64/mysql;
else
ln -s /usr/local/mysql/lib/mysql /usr/lib/mysql;
fi;
chmod 775 /usr/local/mysql/support-files/mysql.server;
/usr/local/mysql/support-files/mysql.server start;
ln -s /usr/local/mysql/bin/mysql /usr/bin/mysql;
ln -s /usr/local/mysql/bin/mysqladmin /usr/bin/mysqladmin;
ln -s /usr/local/mysql/bin/mysqldump /usr/bin/mysqldump;
ln -s /usr/local/mysql/bin/myisamchk /usr/bin/myisamchk;
ln -s /usr/local/mysql/bin/mysqld_safe /usr/bin/mysqld_safe;
/usr/local/mysql/bin/mysqladmin password $MysqlPass;
rm -rf /usr/local/mysql/data/test;
# EOF **********************************
mysql -hlocalhost -uroot -p$MysqlPass <<EOF
USE mysql;
DELETE FROM user WHERE User!='root' OR (User = 'root' AND Host != 'localhost');
UPDATE user set password=password('$MysqlPass') WHERE User='root';
DROP USER ''@'%';
FLUSH PRIVILEGES;
EOF
# **************************************
echo "[OK] ${Mariadb10Version} install completed.";
else
echo '[OK] MySQL is installed.';
fi;
else
InstallPhp;
fi;
}
function InstallPhp()
{
# [dir] /usr/local/php
echo "[${Php56Version} Installing] ************************************************** >>";
Downloadfile "${Php56Version}.tar.gz" "http://cn2.php.net/distributions/${Php56Version}.tar.gz";
rm -rf $AMHDir/packages/untar/$Php56Version;
echo "tar -zxf ${Php56Version}.tar.gz ing...";
tar -zxf $AMHDir/packages/$Php56Version.tar.gz -C $AMHDir/packages/untar;
if [ ! -d /usr/local/php ]; then
cd $AMHDir/packages/untar/$Php56Version;
groupadd www;
useradd -m -s /sbin/nologin -g www www;
if [ "$InstallModel" == '1' ]; then
./configure --prefix=/usr/local/php --enable-fpm --with-fpm-user=www --with-fpm-group=www --with-config-file-path=/etc --with-config-file-scan-dir=/etc/php.d --with-openssl --with-zlib --with-curl --enable-ftp --with-gd --with-jpeg-dir --with-png-dir --with-freetype-dir --enable-gd-native-ttf --enable-mbstring --enable-zip --with-iconv=/usr/local/libiconv --with-mysql=mysqlnd --with-mysqli=mysqlnd --with-pdo-mysql=mysqlnd --enable-opcache --without-pear $PHPDisable;
fi;
make -j $Cpunum;
make install;
cp $AMHDir/conf/php.ini /etc/php.ini;
cp $AMHDir/conf/php /root/amh/php;
cp $AMHDir/conf/phpver /root/amh/phpver;
mkdir -p /root/amh/fpm/sites;
mkdir -p /root/amh/sitesconf;
cp $AMHDir/conf/php-fpm.conf /usr/local/php/etc/php-fpm.conf;
cp $AMHDir/conf/php-fpm-template.conf /root/amh/fpm/php-fpm-template.conf;
chmod +x /root/amh/php;
chmod +x /root/amh/phpver;
mkdir /etc/php.d;
mkdir /usr/local/php/etc/fpm;
mkdir /usr/local/php/var/run/pid;
#mkdir -p /var/run/pid;
touch /usr/local/php/etc/fpm/amh.conf;
/usr/local/php/sbin/php-fpm;
ln -s /usr/local/php/bin/php /usr/bin/php;
ln -s /usr/local/php/bin/phpize /usr/bin/phpize;
ln -s /usr/local/php/sbin/php-fpm /usr/bin/php-fpm;
echo "[OK] ${Php56Version} install completed.";
else
echo '[OK] PHP is installed.';
fi;
}
function InstallPhp53()
{
# [dir] /usr/local/php5.3
echo "[${Php53Version} Installing] ************************************************** >>";
Downloadfile "${Php53Version}.tar.gz" "http://cn2.php.net/distributions/${Php53Version}.tar.gz";
rm -rf $AMHDir/packages/untar/$Php53Version;
echo "tar -zxf ${Php56Version}.tar.gz ing...";
tar -zxf $AMHDir/packages/$Php53Version.tar.gz -C $AMHDir/packages/untar;
if [ ! -d /usr/local/php5.3 ]; then
cd $AMHDir/packages/untar/$Php53Version;
if [ "$InstallModel" == '1' ]; then
./configure --prefix=/usr/local/php5.3 --enable-fpm --with-fpm-user=www --with-fpm-group=www --with-config-file-path=/usr/local/php5.3/etc --with-config-file-scan-dir=/etc/php.d/5.3 --with-openssl --with-zlib --with-curl --enable-ftp --with-gd --with-jpeg-dir --with-png-dir --with-freetype-dir --enable-gd-native-ttf --enable-mbstring --enable-zip --with-iconv=/usr/local/libiconv --with-mysql=mysqlnd --with-mysqli=mysqlnd --with-pdo-mysql=mysqlnd --without-pear $PHPDisable;
fi;
make -j $Cpunum;
make install;
cp $AMHDir/conf/php53.ini /usr/local/php5.3/etc/php.ini;
mkdir -p /etc/php.d/5.3;
echo "[OK] ${Php53Version} install completed.";
else
echo '[OK] PHP5.3 is installed.';
fi;
}
function InstallPhp54()
{
# [dir] /usr/local/php5.4
echo "[${Php54Version} Installing] ************************************************** >>";
Downloadfile "${Php54Version}.tar.gz" "http://cn2.php.net/distributions/${Php54Version}.tar.gz";
rm -rf $AMHDir/packages/untar/$Php54Version;
echo "tar -zxf ${Php54Version}.tar.gz ing...";
tar -zxf $AMHDir/packages/$Php54Version.tar.gz -C $AMHDir/packages/untar;
if [ ! -d /usr/local/php5.4 ]; then
cd $AMHDir/packages/untar/$Php54Version;
if [ "$InstallModel" == '1' ]; then
./configure --prefix=/usr/local/php5.4 --enable-fpm --with-fpm-user=www --with-fpm-group=www --with-config-file-path=/usr/local/php5.4/etc --with-config-file-scan-dir=/etc/php.d/5.4 --with-openssl --with-zlib --with-curl --enable-ftp --with-gd --with-jpeg-dir --with-png-dir --with-freetype-dir --enable-gd-native-ttf --enable-mbstring --enable-zip --with-iconv=/usr/local/libiconv --with-mysql=mysqlnd --with-mysqli=mysqlnd --with-pdo-mysql=mysqlnd --without-pear $PHPDisable;
fi;
make -j $Cpunum;
make install;
cp $AMHDir/conf/php54.ini /usr/local/php5.4/etc/php.ini;
mkdir -p /etc/php.d/5.4;
echo "[OK] ${Php54Version} install completed.";
else
echo '[OK] PHP5.4 is installed.';
fi;
}
function InstallPhp55()
{
# [dir] /usr/local/php5.5
echo "[${Php55Version} Installing] ************************************************** >>";
Downloadfile "${Php55Version}.tar.gz" "http://cn2.php.net/distributions/${Php55Version}.tar.gz";
rm -rf $AMHDir/packages/untar/$Php55Version;
echo "tar -zxf ${Php55Version}.tar.gz ing...";
tar -zxf $AMHDir/packages/$Php55Version.tar.gz -C $AMHDir/packages/untar;
if [ ! -d /usr/local/php5.5 ]; then
cd $AMHDir/packages/untar/$Php55Version;
if [ "$InstallModel" == '1' ]; then
./configure --prefix=/usr/local/php5.5 --enable-fpm --with-fpm-user=www --with-fpm-group=www --with-config-file-path=/usr/local/php5.5/etc --with-config-file-scan-dir=/etc/php.d/5.5 --with-openssl --with-zlib --with-curl --enable-ftp --with-gd --with-jpeg-dir --with-png-dir --with-freetype-dir --enable-gd-native-ttf --enable-mbstring --enable-zip --with-iconv=/usr/local/libiconv --with-mysql=mysqlnd --with-mysqli=mysqlnd --with-pdo-mysql=mysqlnd --enable-opcache --without-pear $PHPDisable;
fi;
make -j $Cpunum;
make install;
cp $AMHDir/conf/php55.ini /usr/local/php5.5/etc/php.ini;
mkdir -p /etc/php.d/5.5;
echo "[OK] ${Php55Version} install completed.";
else
echo '[OK] PHP5.5 is installed.';
fi;
}
function InstallPhp70()
{
# [dir] /usr/local/php7.0
echo "[${Php70Version} Installing] ************************************************** >>";
Downloadfile "${Php70Version}.tar.gz" "http://cn2.php.net/distributions/${Php70Version}.tar.gz";
rm -rf $AMHDir/packages/untar/$Php70Version;
echo "tar -zxf ${Php70Version}.tar.gz ing...";
tar -zxf $AMHDir/packages/$Php70Version.tar.gz -C $AMHDir/packages/untar;
if [ ! -d /usr/local/php7.0 ]; then
cd $AMHDir/packages/untar/$Php70Version;
if [ "$InstallModel" == '1' ]; then
./configure --prefix=/usr/local/php7.0 --enable-fpm --with-fpm-user=www --with-fpm-group=www --with-config-file-path=/usr/local/php7.0/etc --with-config-file-scan-dir=/etc/php.d/7.0 --with-openssl --with-zlib --with-curl --enable-ftp --with-gd --with-jpeg-dir --with-png-dir --with-freetype-dir --enable-gd-native-ttf --enable-mbstring --enable-zip --with-iconv=/usr/local/libiconv --with-mysql=mysqlnd --with-mysqli=mysqlnd --with-pdo-mysql=mysqlnd --enable-opcache --without-pear $PHPDisable;
fi;
make -j $Cpunum;
make install;
cp $AMHDir/conf/php70.ini /usr/local/php7.0/etc/php.ini;
mkdir -p /etc/php.d/7.0;
echo "[OK] ${Php70Version} install completed.";
else
echo '[OK] PHP7.0 is installed.';
fi;
}
function InstallNginx()
{
# [dir] /usr/local/nginx
echo "[${NginxVersion} Installing] ************************************************** >>";
Downloadfile "${NginxVersion}.tar.gz" "http://soft.aeink.com/amh/files/${NginxVersion}.tar.gz";
Downloadfile "${NginxCachePurgeVersion}.tar.gz" "http://soft.aeink.com/amh/files/${NginxCachePurgeVersion}.tar.gz";
rm -rf $AMHDir/packages/untar/$NginxVersion;
echo "tar -zxf ${NginxVersion}.tar.gz ing...";
tar -zxf $AMHDir/packages/$NginxVersion.tar.gz -C $AMHDir/packages/untar;
echo "tar -zxf ${NginxCachePurgeVersion}.tar.gz ing...";
tar -zxf $AMHDir/packages/$NginxCachePurgeVersion.tar.gz -C $AMHDir/packages/untar;
if [ ! -d /usr/local/nginx ]; then
cd $AMHDir/packages/untar/$NginxVersion;
./configure --prefix=/usr/local/nginx --user=www --group=www --with-http_ssl_module --with-http_gzip_static_module --without-mail_pop3_module --without-mail_imap_module --without-mail_smtp_module --without-http_uwsgi_module --without-http_scgi_module --with-ipv6 --with-stream --with-http_v2_module --with-openssl=/usr/local/openssl-1.0.2d --add-module=$AMHDir/packages/untar/$NginxCachePurgeVersion;
cd /usr/local/$OpenSSLVersion;
./config;
cd $AMHDir/packages/untar/$NginxVersion;
make -j $Cpunum;
make install;
mkdir -p /home/proxyroot/proxy_temp_dir;
mkdir -p /home/proxyroot/proxy_cache_dir;
chown www.www -R /home/proxyroot/proxy_temp_dir /home/proxyroot/proxy_cache_dir;
chmod -R 644 /home/proxyroot/proxy_temp_dir /home/proxyroot/proxy_cache_dir;
mkdir -p /home/wwwroot/index /home/backup /usr/local/nginx/conf/vhost/ /usr/local/nginx/conf/vhost_stop/ /usr/local/nginx/conf/rewrite/;
chown +w /home/wwwroot/index;
touch /usr/local/nginx/conf/rewrite/amh.conf;
cp $AMHDir/conf/proxy.conf /usr/local/nginx/conf/proxy.conf;
cp $AMHDir/conf/nginx.conf /usr/local/nginx/conf/nginx.conf;
cp $AMHDir/conf/nginx-host.conf /usr/local/nginx/conf/nginx-host.conf;
cp $AMHDir/conf/fcgi.conf /usr/local/nginx/conf/fcgi.conf;
cp $AMHDir/conf/fcgi-host.conf /usr/local/nginx/conf/fcgi-host.conf;
cp $AMHDir/conf/nginx /root/amh/nginx;
cp $AMHDir/conf/host /root/amh/host;
chmod +x /root/amh/nginx;
chmod +x /root/amh/host;
sed -i 's/www.amysql.com/'$Domain'/g' /usr/local/nginx/conf/nginx.conf;
cd /home/wwwroot/index;
mkdir -p tmp etc/rsa bin usr/sbin log;
touch etc/upgrade.conf;
chown mysql:mysql etc/rsa;
chmod 777 tmp;
[ "$SysBit" == '64' ] && mkdir lib64 || mkdir lib;
/usr/local/nginx/sbin/nginx;
/usr/local/php/sbin/php-fpm;
ln -s /usr/local/nginx/sbin/nginx /usr/bin/nginx;
echo "[OK] ${NginxVersion} install completed.";
else
echo '[OK] Nginx is installed.';
fi;
}
function InstallPureFTPd()
{
# [dir] /etc/ /usr/local/bin /usr/local/sbin
echo "[${PureFTPdVersion} Installing] ************************************************** >>";
Downloadfile "${PureFTPdVersion}.tar.gz" "http://soft.aeink.com/amh/files/${PureFTPdVersion}.tar.gz";
rm -rf $AMHDir/packages/untar/$PureFTPdVersion;
echo "tar -zxf ${PureFTPdVersion}.tar.gz ing...";
tar -zxf $AMHDir/packages/$PureFTPdVersion.tar.gz -C $AMHDir/packages/untar;
if [ ! -f /etc/pure-ftpd.conf ]; then
cd $AMHDir/packages/untar/$PureFTPdVersion;
./configure --with-puredb --with-quotas --with-throttling --with-ratios --with-peruserlimits;
make -j $Cpunum;
make install;
cp contrib/redhat.init /usr/local/sbin/redhat.init;
chmod 755 /usr/local/sbin/redhat.init;
cp $AMHDir/conf/pure-ftpd.conf /etc;
cp configuration-file/pure-config.pl /usr/local/sbin/pure-config.pl;
chmod 744 /etc/pure-ftpd.conf;
chmod 755 /usr/local/sbin/pure-config.pl;
/usr/local/sbin/redhat.init start;
groupadd ftpgroup;
useradd -d /home/wwwroot/ -s /sbin/nologin -g ftpgroup ftpuser;
cp $AMHDir/conf/ftp /root/amh/ftp;
chmod +x /root/amh/ftp;
/sbin/iptables-save > /etc/amh-iptables;
sed -i '/--dport 21 -j ACCEPT/d' /etc/amh-iptables;
sed -i '/--dport 80 -j ACCEPT/d' /etc/amh-iptables;
sed -i '/--dport 8888 -j ACCEPT/d' /etc/amh-iptables;
sed -i '/--dport 10100:10110 -j ACCEPT/d' /etc/amh-iptables;
/sbin/iptables-restore < /etc/amh-iptables;
/sbin/iptables -I INPUT -p tcp --dport 21 -j ACCEPT;
/sbin/iptables -I INPUT -p tcp --dport 80 -j ACCEPT;
/sbin/iptables -I INPUT -p tcp --dport 8888 -j ACCEPT;
/sbin/iptables -I INPUT -p tcp --dport 10100:10110 -j ACCEPT;
/sbin/iptables-save > /etc/amh-iptables;
echo 'IPTABLES_MODULES="ip_conntrack_ftp"' >>/etc/sysconfig/iptables-config;
touch /etc/pureftpd.passwd;
chmod 774 /etc/pureftpd.passwd;
echo "[OK] ${PureFTPdVersion} install completed.";
else
echo '[OK] PureFTPd is installed.';
fi;
}
function InstallAMH()
{
# [dir] /home/wwwroot/index/web
echo "[${AMHVersion} Installing] ************************************************** >>";
Downloadfile "${AMHVersion}.tar.gz" "http://soft.aeink.com/amh/files/${AMHVersion}.tar.gz";
rm -rf $AMHDir/packages/untar/$AMHVersion;
echo "tar -xf ${AMHVersion}.tar.gz ing...";
tar -xf $AMHDir/packages/$AMHVersion.tar.gz -C $AMHDir/packages/untar;
if [ ! -d /home/wwwroot/index/web ]; then
cp -r $AMHDir/packages/untar/$AMHVersion /home/wwwroot/index/web;
gcc -o /bin/amh -Wall $AMHDir/conf/amh.c;
chmod 4775 /bin/amh;
cp -a $AMHDir/conf/amh-backup.conf /home/wwwroot/index/etc;
cp -a $AMHDir/conf/html /home/wwwroot/index/etc;
cp $AMHDir/conf/{all,backup,revert,BRssh,BRftp,info,SetParam,module,crontab,upgrade} /root/amh;
cp -a $AMHDir/conf/modules /root/amh;
chmod +x /root/amh/all /root/amh/backup /root/amh/revert /root/amh/BRssh /root/amh/BRftp /root/amh/info /root/amh/SetParam /root/amh/module /root/amh/crontab /root/amh/upgrade;
SedMysqlPass=${MysqlPass//&/\\\&};
SedMysqlPass=${SedMysqlPass//\'/\\\\\'};
sed -i "s/'MysqlPass'/'${SedMysqlPass}'/g" /home/wwwroot/index/web/Amysql/Config.php;
chown www:www /home/wwwroot/index/web/Amysql/Config.php;
SedAMHPass=${AMHPass//&/\\\&};
SedAMHPass=${SedAMHPass//\'/\\\\\\\\\'\'};
sed -i "s/'AMHPass_amysql-amh'/'${SedAMHPass}_amysql-amh'/g" $AMHDir/conf/amh.sql;
/usr/local/mysql/bin/mysql -u root -p$MysqlPass < $AMHDir/conf/amh.sql;
echo "[OK] ${AMHVersion} install completed.";
else
echo '[OK] AMH is installed.';
fi;
}
function InstallAMS()
{
# [dir] /home/wwwroot/index/web/ams
echo "[${AMSVersion} Installing] ************************************************** >>";
Downloadfile "${AMSVersion}.tar.gz" "http://soft.aeink.com/amh/files/${AMSVersion}.tar.gz";
rm -rf $AMHDir/packages/untar/$AMSVersion;
echo "tar -xf ${AMSVersion}.tar.gz ing...";
tar -xf $AMHDir/packages/$AMSVersion.tar.gz -C $AMHDir/packages/untar;
if [ ! -d /home/wwwroot/index/web/ams ]; then
cp -r $AMHDir/packages/untar/$AMSVersion /home/wwwroot/index/web/ams;
chown www:www -R /home/wwwroot/index/web/ams/View/DataFile;
echo "[OK] ${AMSVersion} install completed.";
else
echo '[OK] AMS is installed.';
fi;
}
# AMH Installing ****************************************************************************
CheckSystem;
ConfirmInstall;
InputDomain;
InputMysqlPass;
InputAMHPass;
Timezone;
CloseSelinux;
DeletePackages;
InstallBasePackages;
InstallReady;
InstallLibiconv;
#InstallMysql;
InstallMysql55;
InstallMysql56;
InstallMysql57;
InstallMariadb55;
InstallMariadb10;
InstallPhp;
[ "$confirm53" == 'y' ] && InstallPhp53;
[ "$confirm54" == 'y' ] && InstallPhp54;
[ "$confirm55" == 'y' ] && InstallPhp55;
[ "$confirm70" == 'y' ] && InstallPhp70;
InstallOpenSSL;
InstallNginx;
InstallPureFTPd;
InstallAMH;
InstallAMS;
if [ -s /usr/local/nginx ] && [ -s /usr/local/php ] && [ -s /usr/local/mysql ]; then
cp $AMHDir/conf/amh-start /etc/init.d/amh-start;
chmod 775 /etc/init.d/amh-start;
if [ "$SysName" == 'centos' ]; then
chkconfig --add amh-start;
chkconfig amh-start on;
else
update-rc.d -f amh-start defaults;
fi;
/etc/init.d/amh-start;
rm -rf $AMHDir;
echo '================================================================';
echo '[AMH] Congratulations, AMH 4.2 install completed.';
echo "AMH Management: http://${Domain}:8888";
echo 'User:admin';
echo "Password:${AMHPass}";
echo "MySQL Password:${MysqlPass}";
echo '';
echo '******* SSH Management *******';
echo 'Host: amh host';
echo 'PHP: amh php';
echo 'Nginx: amh nginx';
echo 'MySQL: amh mysql';
echo 'FTP: amh ftp';
echo 'Backup: amh backup';
echo 'Revert: amh revert';
echo 'SetParam: amh SetParam';
echo 'Module : amh module';
echo 'Crontab : amh crontab';
echo 'Upgrade : amh upgrade';
echo 'Info: amh info';
echo '';
echo '******* SSH Dirs *******';
echo 'WebSite: /home/wwwroot';
echo 'Nginx: /usr/local/nginx';
echo 'PHP: /usr/local/php';
echo 'MySQL: /usr/local/mysql';
echo 'MySQL-Data: /home/mysqldata';
echo '';
echo "Start time: ${StartDate}";
echo "Completion time: $(date) (Use: $[($(date +%s)-StartDateSecond)/60] minute)";
echo 'More help please visit:http://amysql.com';
echo '================================================================';
else
echo 'Sorry, Failed to install AMH';
echo 'Please contact us: http://amysql.com';
fi;
|
lulidong2016/kloxo
|
amh1.sh
|
Shell
|
apache-2.0
| 40,985 |
BootstrapSuseCommon() {
# SLE12 don't have python-virtualenv
if [ "$ASSUME_YES" = 1 ]; then
zypper_flags="-nq"
install_flags="-l"
fi
$SUDO zypper $zypper_flags in $install_flags \
python \
python-devel \
python-virtualenv \
gcc \
dialog \
augeas-lenses \
libopenssl-devel \
libffi-devel \
ca-certificates
}
|
jtl999/certbot
|
letsencrypt-auto-source/pieces/bootstrappers/suse_common.sh
|
Shell
|
apache-2.0
| 361 |
#!/bin/bash
# Use this script to delete Eclipse related files from the cog
# Must be run from the cog directory
# This is unsupported
exclude="template qos portlets"
rm -rf .build
rm -f .project
rm -f .classpath
for module in modules/*; do
rm -rf $module/.build
rm -f $module/.project
rm -f $module/.classpath
done
|
swift-lang/swift-k
|
cogkit/tools/cleaneclipsedata.sh
|
Shell
|
apache-2.0
| 320 |
#!/bin/bash
# Adapted from https://gist.github.com/domenic/ec8b0fc8ab45f39403dd.
set -e
# For revert branches, do nothing
if [[ "$TRAVIS_BRANCH" == revert-* ]]; then
echo -e "\e[36m\e[1mBuild triggered for reversion branch \"${TRAVIS_BRANCH}\" - doing nothing."
exit 0
fi
# For PRs, do nothing
if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then
echo -e "\e[36m\e[1mBuild triggered for PR #${TRAVIS_PULL_REQUEST} to branch \"${TRAVIS_BRANCH}\" - doing nothing."
exit 0
fi
# Figure out the source of the build
if [ -n "$TRAVIS_TAG" ]; then
echo -e "\e[36m\e[1mBuild triggered for tag \"${TRAVIS_TAG}\"."
SOURCE=$TRAVIS_TAG
SOURCE_TYPE="tag"
else
echo -e "\e[36m\e[1mBuild triggered for branch \"${TRAVIS_BRANCH}\"."
SOURCE=$TRAVIS_BRANCH
SOURCE_TYPE="branch"
fi
# For Node != 8, do nothing
if [ "$TRAVIS_NODE_VERSION" != "8" ]; then
echo -e "\e[36m\e[1mBuild triggered with Node v${TRAVIS_NODE_VERSION} - doing nothing."
exit 0
fi
# Run the build
npm run docs
VERSIONED=false npm run webpack
# Initialise some useful variables
REPO=`git config remote.origin.url`
SSH_REPO=${REPO/https:\/\/github.com\//[email protected]:}
SHA=`git rev-parse --verify HEAD`
# Decrypt and add the ssh key
ENCRYPTED_KEY_VAR="encrypted_${ENCRYPTION_LABEL}_key"
ENCRYPTED_IV_VAR="encrypted_${ENCRYPTION_LABEL}_iv"
ENCRYPTED_KEY=${!ENCRYPTED_KEY_VAR}
ENCRYPTED_IV=${!ENCRYPTED_IV_VAR}
openssl aes-256-cbc -K $ENCRYPTED_KEY -iv $ENCRYPTED_IV -in travis/deploy-key.enc -out deploy-key -d
chmod 600 deploy-key
eval `ssh-agent -s`
ssh-add deploy-key
# Checkout the repo in the target branch so we can build docs and push to it
TARGET_BRANCH="docs"
git clone $REPO out -b $TARGET_BRANCH
# Move the generated JSON file to the newly-checked-out repo, to be committed and pushed
mv docs/docs.json out/$SOURCE.json
# Commit and push
cd out
git add .
git config user.name "Travis CI"
git config user.email "$COMMIT_AUTHOR_EMAIL"
git commit -m "Docs build for ${SOURCE_TYPE} ${SOURCE}: ${SHA}" || true
git push $SSH_REPO $TARGET_BRANCH
# Clean up...
cd ..
rm -rf out
# ...then do the same once more for the webpack
TARGET_BRANCH="webpack"
git clone $REPO out -b $TARGET_BRANCH
# Move the generated webpack over
mv webpack/discord.js out/discord.$SOURCE.js
mv webpack/discord.min.js out/discord.$SOURCE.min.js
# Commit and push
cd out
git add .
git config user.name "Travis CI"
git config user.email "$COMMIT_AUTHOR_EMAIL"
git commit -m "Webpack build for ${SOURCE_TYPE} ${SOURCE}: ${SHA}" || true
git push $SSH_REPO $TARGET_BRANCH
|
zajrik/discord.js
|
travis/deploy.sh
|
Shell
|
apache-2.0
| 2,527 |
#!/bin/bash
export ANT_OPTS=-Xmx512m
export DBFLUTE_HOME=../mydbflute/dbflute-1.x
export MY_PROPERTIES_PATH=build.properties
|
dbflute-test/dbflute-test-lang-go
|
dbflute_maihamadb/_project.sh
|
Shell
|
apache-2.0
| 128 |
#!/bin/sh
#
# This source file is part of the FabSim software toolkit, which is distributed under the BSD 3-Clause license.
# Please refer to LICENSE for detailed information regarding the licensing.
#
# Legacy implementation of the Iterative Boltzmann Inversion.
in=$4 #../python
atom=$1 #../python
out=$5 #../python
ibi_script_dir=../python
i=$2
pres=$3
lammps_data_file=lammps_data_file
python $ibi_script_dir/IBI.py lammps_input_file $in/in.CG.lammps lammps_output_file $out/in.CG.lammps LAMMPS_data_file $atom/$lammps_data_file lammps_rdf_file $in/tmp.$i.rdf correct_rdf_base $atom/rdf potential_base $in/pot.$i.new number $i CG_logfile $in/new_CG.prod$i.log pressure_flag $pres
|
UCL-CCS/FabSim
|
blackbox/ibi.sh
|
Shell
|
bsd-3-clause
| 688 |
rm -f main.exe main.obj
cl /c "/IC:\\Documents and Settings\\builder\\usr\\include" main.c
link /nologo /OUT:main.exe \
"/LIBPATH:C:\\Documents and Settings\\builder\\usr\\lib" \
main.obj ndarray.lib
./main.exe
|
numpy/numpy-refactor
|
libndarray/windows/test/do.sh
|
Shell
|
bsd-3-clause
| 222 |
#!/bin/sh
touch /var/log/vaban_access_log
chown vaban:vaban /var/log/vaban_access_log
|
marcomoscardini/vaban
|
packaging/postinstall.sh
|
Shell
|
mit
| 86 |
#!/bin/bash
FN="MeSH.Syn.eg.db_1.12.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.9/data/annotation/src/contrib/MeSH.Syn.eg.db_1.12.0.tar.gz"
"https://bioarchive.galaxyproject.org/MeSH.Syn.eg.db_1.12.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-mesh.syn.eg.db/bioconductor-mesh.syn.eg.db_1.12.0_src_all.tar.gz"
)
MD5="be1717480eabc1ace26196d2608c39f5"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
mdehollander/bioconda-recipes
|
recipes/bioconductor-mesh.syn.eg.db/post-link.sh
|
Shell
|
mit
| 1,321 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.